Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
authorAlex Shi <alex.shi@linaro.org>
Tue, 6 Dec 2016 05:01:29 +0000 (13:01 +0800)
committerAlex Shi <alex.shi@linaro.org>
Tue, 6 Dec 2016 05:01:29 +0000 (13:01 +0800)
Conflicts:
keep low scan freq in android in net/wireless/scan.c

549 files changed:
Documentation/ABI/testing/sysfs-class-dual-role-usb [new file with mode: 0644]
Documentation/ABI/testing/sysfs-kernel-wakeup_reasons [new file with mode: 0644]
Documentation/android.txt [new file with mode: 0644]
Documentation/block/00-INDEX
Documentation/block/mmc-max-speed.txt [new file with mode: 0644]
Documentation/cpu-freq/governors.txt
Documentation/device-mapper/boot.txt [new file with mode: 0644]
Documentation/device-mapper/verity.txt
Documentation/devicetree/bindings/goldfish/audio.txt [new file with mode: 0644]
Documentation/devicetree/bindings/goldfish/battery.txt [new file with mode: 0644]
Documentation/devicetree/bindings/goldfish/events.txt [new file with mode: 0644]
Documentation/devicetree/bindings/goldfish/tty.txt [new file with mode: 0644]
Documentation/devicetree/bindings/misc/memory-state-time.txt [new file with mode: 0644]
Documentation/devicetree/bindings/misc/ramoops.txt [new file with mode: 0644]
Documentation/devicetree/bindings/scheduler/sched-energy-costs.txt [new file with mode: 0644]
Documentation/filesystems/proc.txt
Documentation/kernel-parameters.txt
Documentation/networking/ip-sysctl.txt
Documentation/ramoops.txt
Documentation/scheduler/sched-energy.txt [new file with mode: 0644]
Documentation/scheduler/sched-tune.txt [new file with mode: 0644]
Documentation/sync.txt [new file with mode: 0644]
Documentation/sysctl/kernel.txt
Documentation/sysctl/vm.txt
Documentation/trace/events-power.txt
Documentation/trace/ftrace.txt
android/configs/README [new file with mode: 0644]
android/configs/android-base.cfg [new file with mode: 0644]
android/configs/android-recommended.cfg [new file with mode: 0644]
arch/Kconfig
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/Makefile
arch/arm/boot/.gitignore
arch/arm/boot/Makefile
arch/arm/boot/compressed/head.S
arch/arm/boot/dts/Makefile
arch/arm/common/Kconfig
arch/arm/common/Makefile
arch/arm/common/fiq_glue.S [new file with mode: 0644]
arch/arm/common/fiq_glue_setup.c [new file with mode: 0644]
arch/arm/configs/ranchu_defconfig [new file with mode: 0644]
arch/arm/include/asm/fiq_glue.h [new file with mode: 0644]
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/topology.h
arch/arm/kernel/asm-offsets.c
arch/arm/kernel/kgdb.c
arch/arm/kernel/process.c
arch/arm/kernel/reboot.c
arch/arm/kernel/setup.c
arch/arm/kernel/topology.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/mm/cache-v6.S
arch/arm/mm/fault.c
arch/arm/mm/mmap.c
arch/arm/mm/mmu.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/.gitignore
arch/arm64/boot/Makefile
arch/arm64/boot/dts/Makefile
arch/arm64/configs/ranchu64_defconfig [new file with mode: 0644]
arch/arm64/include/asm/alternative.h
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/esr.h
arch/arm64/include/asm/futex.h
arch/arm64/include/asm/kernel-pgtable.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/module.h
arch/arm64/include/asm/ptrace.h
arch/arm64/include/asm/spinlock.h
arch/arm64/include/asm/thread_info.h
arch/arm64/include/asm/topology.h
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/armv8_deprecated.c
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/efi.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/process.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/topology.c
arch/arm64/kernel/traps.c
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/lib/clear_user.S
arch/arm64/lib/copy_from_user.S
arch/arm64/lib/copy_in_user.S
arch/arm64/lib/copy_to_user.S
arch/arm64/mm/cache.S
arch/arm64/mm/context.c
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/fault.c
arch/arm64/mm/init.c
arch/arm64/mm/mmap.c
arch/arm64/mm/mmu.c
arch/arm64/mm/proc-macros.S [deleted file]
arch/arm64/mm/proc.S
arch/arm64/xen/hypercall.S
arch/ia64/include/asm/early_ioremap.h [new file with mode: 0644]
arch/ia64/include/asm/io.h
arch/mips/mm/mmap.c
arch/powerpc/kernel/process.c
arch/powerpc/mm/mmap.c
arch/sparc/kernel/sys_sparc_64.c
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/configs/i386_ranchu_defconfig [new file with mode: 0644]
arch/x86/configs/x86_64_ranchu_defconfig [new file with mode: 0644]
arch/x86/include/asm/idle.h
arch/x86/include/asm/uaccess.h
arch/x86/kernel/process.c
arch/x86/mm/mmap.c
block/blk-core.c
block/genhd.c
block/partition-generic.c
drivers/android/Kconfig
drivers/android/binder.c
drivers/base/dd.c
drivers/base/power/main.c
drivers/base/power/power.h
drivers/base/power/wakeup.c
drivers/base/syscore.c
drivers/char/random.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Makefile
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_interactive.c [new file with mode: 0644]
drivers/cpuidle/cpuidle.c
drivers/cpuidle/governors/menu.c
drivers/dma-buf/fence.c
drivers/firmware/efi/Makefile
drivers/firmware/efi/arm-init.c [new file with mode: 0644]
drivers/firmware/efi/arm-runtime.c [new file with mode: 0644]
drivers/firmware/efi/efi.c
drivers/hid/hid-sensor-hub.c
drivers/input/Kconfig
drivers/input/Makefile
drivers/input/keyboard/goldfish_events.c
drivers/input/keycombo.c [new file with mode: 0644]
drivers/input/keyreset.c [new file with mode: 0644]
drivers/input/misc/Kconfig
drivers/input/misc/Makefile
drivers/input/misc/gpio_axis.c [new file with mode: 0644]
drivers/input/misc/gpio_event.c [new file with mode: 0644]
drivers/input/misc/gpio_input.c [new file with mode: 0644]
drivers/input/misc/gpio_matrix.c [new file with mode: 0644]
drivers/input/misc/gpio_output.c [new file with mode: 0644]
drivers/input/misc/keychord.c [new file with mode: 0644]
drivers/md/Kconfig
drivers/md/Makefile
drivers/md/dm-android-verity.c [new file with mode: 0644]
drivers/md/dm-android-verity.h [new file with mode: 0644]
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-ioctl.c
drivers/md/dm-linear.c
drivers/md/dm-snap.c
drivers/md/dm-table.c
drivers/md/dm-verity-fec.c [new file with mode: 0644]
drivers/md/dm-verity-fec.h [new file with mode: 0644]
drivers/md/dm-verity-target.c [new file with mode: 0644]
drivers/md/dm-verity.c [deleted file]
drivers/md/dm-verity.h [new file with mode: 0644]
drivers/media/tuners/tuner-xc2028.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/memory_state_time.c [new file with mode: 0644]
drivers/misc/uid_cputime.c [new file with mode: 0644]
drivers/mmc/card/Kconfig
drivers/mmc/card/block.c
drivers/mmc/card/queue.c
drivers/mmc/card/queue.h
drivers/mmc/core/Kconfig
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/host.h
drivers/mmc/core/sd.c
drivers/mmc/core/sdio.c
drivers/mmc/core/sdio_bus.c
drivers/mmc/core/sdio_io.c
drivers/mtd/nand/Kconfig
drivers/net/ppp/Kconfig
drivers/net/ppp/Makefile
drivers/net/ppp/ppp_generic.c
drivers/net/ppp/pppolac.c [new file with mode: 0644]
drivers/net/ppp/pppopns.c [new file with mode: 0644]
drivers/net/tun.c
drivers/net/wireless/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/ti/wlcore/init.c
drivers/of/fdt.c
drivers/platform/Kconfig
drivers/platform/goldfish/Kconfig
drivers/platform/goldfish/Makefile
drivers/platform/goldfish/goldfish_pipe.c
drivers/platform/goldfish/goldfish_pipe.h [new file with mode: 0644]
drivers/platform/goldfish/goldfish_pipe_v2.c [new file with mode: 0644]
drivers/power/goldfish_battery.c
drivers/power/power_supply_sysfs.c
drivers/rtc/rtc-palmas.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/staging/android/Kconfig
drivers/staging/android/Makefile
drivers/staging/android/ashmem.c
drivers/staging/android/fiq_debugger/Kconfig [new file with mode: 0644]
drivers/staging/android/fiq_debugger/Makefile [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_debugger.c [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_debugger.h [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_debugger_arm.c [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_debugger_priv.h [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_watchdog.c [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_watchdog.h [new file with mode: 0644]
drivers/staging/android/ion/Kconfig
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_carveout_heap.c
drivers/staging/android/ion/ion_page_pool.c
drivers/staging/android/ion/ion_priv.h
drivers/staging/android/ion/ion_system_heap.c
drivers/staging/android/lowmemorykiller.c
drivers/staging/android/sync.c
drivers/staging/android/trace/lowmemorykiller.h [new file with mode: 0644]
drivers/staging/android/uapi/ashmem.h
drivers/staging/goldfish/Kconfig
drivers/staging/goldfish/Makefile
drivers/staging/goldfish/goldfish_audio.c
drivers/staging/goldfish/goldfish_sync.c [new file with mode: 0644]
drivers/tty/goldfish.c
drivers/tty/serial/serial_core.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/composite.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/Makefile
drivers/usb/gadget/function/f_accessory.c [new file with mode: 0644]
drivers/usb/gadget/function/f_audio_source.c [new file with mode: 0644]
drivers/usb/gadget/function/f_midi.c
drivers/usb/gadget/function/f_mtp.c [new file with mode: 0644]
drivers/usb/gadget/function/f_mtp.h [new file with mode: 0644]
drivers/usb/gadget/function/f_ptp.c [new file with mode: 0644]
drivers/usb/gadget/function/f_rndis.c
drivers/usb/gadget/function/rndis.c
drivers/usb/gadget/function/rndis.h
drivers/usb/gadget/function/u_ether.c
drivers/usb/gadget/function/u_ether.h
drivers/usb/phy/Kconfig
drivers/usb/phy/Makefile
drivers/usb/phy/class-dual-role.c [new file with mode: 0644]
drivers/usb/phy/otg-wakelock.c [new file with mode: 0644]
drivers/video/Kconfig
drivers/video/Makefile
drivers/video/adf/Kconfig [new file with mode: 0644]
drivers/video/adf/Makefile [new file with mode: 0644]
drivers/video/adf/adf.c [new file with mode: 0644]
drivers/video/adf/adf.h [new file with mode: 0644]
drivers/video/adf/adf_client.c [new file with mode: 0644]
drivers/video/adf/adf_fbdev.c [new file with mode: 0644]
drivers/video/adf/adf_fops.c [new file with mode: 0644]
drivers/video/adf/adf_fops.h [new file with mode: 0644]
drivers/video/adf/adf_fops32.c [new file with mode: 0644]
drivers/video/adf/adf_fops32.h [new file with mode: 0644]
drivers/video/adf/adf_format.c [new file with mode: 0644]
drivers/video/adf/adf_memblock.c [new file with mode: 0644]
drivers/video/adf/adf_sysfs.c [new file with mode: 0644]
drivers/video/adf/adf_sysfs.h [new file with mode: 0644]
drivers/video/adf/adf_trace.h [new file with mode: 0644]
drivers/video/fbdev/goldfishfb.c
drivers/w1/masters/ds2482.c
fs/Kconfig
fs/Makefile
fs/binfmt_elf.c
fs/dcache.c
fs/eventpoll.c
fs/ext4/ext4.h
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mballoc.c
fs/ext4/readpage.c
fs/f2fs/data.c
fs/f2fs/inline.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/fuse/dir.c
fs/fuse/fuse_i.h
fs/mpage.c
fs/notify/inotify/inotify_user.c
fs/proc/base.c
fs/proc/kcore.c
fs/proc/task_mmu.c
fs/pstore/platform.c
fs/pstore/pmsg.c
fs/pstore/ram.c
fs/pstore/ram_core.c
fs/sdcardfs/Kconfig [new file with mode: 0644]
fs/sdcardfs/Makefile [new file with mode: 0644]
fs/sdcardfs/dentry.c [new file with mode: 0644]
fs/sdcardfs/derived_perm.c [new file with mode: 0644]
fs/sdcardfs/file.c [new file with mode: 0644]
fs/sdcardfs/inode.c [new file with mode: 0644]
fs/sdcardfs/lookup.c [new file with mode: 0644]
fs/sdcardfs/main.c [new file with mode: 0644]
fs/sdcardfs/mmap.c [new file with mode: 0644]
fs/sdcardfs/multiuser.h [new file with mode: 0644]
fs/sdcardfs/packagelist.c [new file with mode: 0644]
fs/sdcardfs/sdcardfs.h [new file with mode: 0644]
fs/sdcardfs/super.c [new file with mode: 0644]
fs/select.c
fs/super.c
fs/userfaultfd.c
include/asm-generic/vmlinux.lds.h
include/linux/Kbuild [new file with mode: 0644]
include/linux/amba/mmci.h
include/linux/android_aid.h [new file with mode: 0644]
include/linux/blkdev.h
include/linux/cgroup_subsys.h
include/linux/cpu.h
include/linux/cpufreq.h
include/linux/cpuidle.h
include/linux/dcache.h
include/linux/device-mapper.h
include/linux/fence.h
include/linux/freezer.h
include/linux/gpio_event.h [new file with mode: 0644]
include/linux/hrtimer.h
include/linux/if_pppolac.h [new file with mode: 0644]
include/linux/if_pppopns.h [new file with mode: 0644]
include/linux/if_pppox.h
include/linux/inet_diag.h
include/linux/initramfs.h [new file with mode: 0644]
include/linux/ipv6.h
include/linux/keychord.h [new file with mode: 0644]
include/linux/keycombo.h [new file with mode: 0644]
include/linux/keyreset.h [new file with mode: 0644]
include/linux/memblock.h
include/linux/memory-state-time.h [new file with mode: 0644]
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmc/core.h
include/linux/mmc/host.h
include/linux/mmc/pm.h
include/linux/mmc/sdio_func.h
include/linux/namei.h
include/linux/netfilter/xt_qtaguid.h [new file with mode: 0644]
include/linux/netfilter/xt_quota2.h [new file with mode: 0644]
include/linux/nmi.h
include/linux/of_fdt.h
include/linux/percpu-rwsem.h
include/linux/perf_event.h
include/linux/platform_data/ds2482.h [new file with mode: 0644]
include/linux/pm.h
include/linux/poll.h
include/linux/power_supply.h
include/linux/pstore.h
include/linux/pstore_ram.h
include/linux/random.h
include/linux/rcu_sync.h
include/linux/sched.h
include/linux/sched/sysctl.h
include/linux/sched_energy.h [new file with mode: 0644]
include/linux/serial_core.h
include/linux/sock_diag.h
include/linux/suspend.h
include/linux/thread_info.h
include/linux/timekeeping.h
include/linux/usb/class-dual-role.h [new file with mode: 0644]
include/linux/usb/composite.h
include/linux/usb/f_accessory.h [new file with mode: 0644]
include/linux/usb/f_mtp.h [new file with mode: 0644]
include/linux/vmstat.h
include/linux/wakelock.h [new file with mode: 0644]
include/linux/wakeup_reason.h [new file with mode: 0644]
include/linux/wlan_plat.h [new file with mode: 0644]
include/net/addrconf.h
include/net/fib_rules.h
include/net/flow.h
include/net/ip.h
include/net/ip6_route.h
include/net/route.h
include/net/sock.h
include/net/tcp.h
include/net/udp.h
include/trace/events/android_fs.h [new file with mode: 0644]
include/trace/events/android_fs_template.h [new file with mode: 0644]
include/trace/events/cpufreq_interactive.h [new file with mode: 0644]
include/trace/events/cpufreq_sched.h [new file with mode: 0644]
include/trace/events/gpu.h [new file with mode: 0644]
include/trace/events/mmc.h [new file with mode: 0644]
include/trace/events/power.h
include/trace/events/sched.h
include/uapi/linux/android/binder.h
include/uapi/linux/fib_rules.h
include/uapi/linux/fs.h
include/uapi/linux/fuse.h
include/uapi/linux/if_pppolac.h [new file with mode: 0644]
include/uapi/linux/if_pppopns.h [new file with mode: 0644]
include/uapi/linux/if_pppox.h
include/uapi/linux/inet_diag.h
include/uapi/linux/ipv6.h
include/uapi/linux/keychord.h [new file with mode: 0644]
include/uapi/linux/magic.h
include/uapi/linux/netfilter/xt_IDLETIMER.h
include/uapi/linux/netfilter/xt_socket.h
include/uapi/linux/prctl.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/sock_diag.h
include/uapi/linux/usb/f_accessory.h [new file with mode: 0644]
include/uapi/linux/usb/f_mtp.h [new file with mode: 0644]
include/uapi/video/adf.h [new file with mode: 0644]
include/video/adf.h [new file with mode: 0644]
include/video/adf_client.h [new file with mode: 0644]
include/video/adf_fbdev.h [new file with mode: 0644]
include/video/adf_format.h [new file with mode: 0644]
include/video/adf_memblock.h [new file with mode: 0644]
init/Kconfig
init/Makefile
init/do_mounts.c
init/do_mounts.h
init/do_mounts_dm.c [new file with mode: 0644]
init/initramfs.c
init/noinitramfs.c
kernel/audit.c
kernel/auditsc.c
kernel/cgroup.c
kernel/cpu.c
kernel/cpuset.c
kernel/debug/kdb/kdb_io.c
kernel/events/core.c
kernel/exit.c
kernel/fork.c
kernel/locking/percpu-rwsem.c
kernel/power/Kconfig
kernel/power/Makefile
kernel/power/process.c
kernel/power/suspend.c
kernel/power/wakeup_reason.c [new file with mode: 0644]
kernel/printk/printk.c
kernel/rcu/sync.c
kernel/sched/Makefile
kernel/sched/core.c
kernel/sched/cpufreq_sched.c [new file with mode: 0644]
kernel/sched/cputime.c
kernel/sched/deadline.c
kernel/sched/energy.c [new file with mode: 0644]
kernel/sched/fair.c
kernel/sched/features.h
kernel/sched/idle.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/stop_task.c
kernel/sched/tune.c [new file with mode: 0644]
kernel/sched/tune.h [new file with mode: 0644]
kernel/sched/walt.c [new file with mode: 0644]
kernel/sched/walt.h [new file with mode: 0644]
kernel/sys.c
kernel/sysctl.c
kernel/time/hrtimer.c
kernel/time/timekeeping.c
kernel/time/timer.c
kernel/trace/Kconfig
kernel/trace/Makefile
kernel/trace/gpu-traces.c [new file with mode: 0644]
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_functions_graph.c
kernel/trace/trace_output.c
kernel/watchdog.c
lib/Kconfig.debug
lib/strncpy_from_user.c
mm/madvise.c
mm/memblock.c
mm/memcontrol.c
mm/mempolicy.c
mm/mlock.c
mm/mmap.c
mm/mprotect.c
mm/page_alloc.c
mm/shmem.c
mm/usercopy.c
mm/vmstat.c
net/Kconfig
net/bluetooth/af_bluetooth.c
net/bridge/br_device.c
net/core/fib_rules.c
net/core/sock_diag.c
net/ipv4/Kconfig
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/fib_frontend.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/ip_output.c
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/sysfs_net_ipv4.c [new file with mode: 0644]
net/ipv4/tcp.c
net/ipv4/tcp_diag.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/udp_diag.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/ah6.c
net/ipv6/datagram.c
net/ipv6/esp6.c
net/ipv6/exthdrs_core.c
net/ipv6/icmp.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_vti.c
net/ipv6/ipcomp6.c
net/ipv6/ping.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/mac80211/mesh_hwmp.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/nfnetlink.c
net/netfilter/xt_IDLETIMER.c
net/netfilter/xt_qtaguid.c [new file with mode: 0644]
net/netfilter/xt_qtaguid_internal.h [new file with mode: 0644]
net/netfilter/xt_qtaguid_print.c [new file with mode: 0644]
net/netfilter/xt_qtaguid_print.h [new file with mode: 0644]
net/netfilter/xt_quota2.c [new file with mode: 0644]
net/netfilter/xt_socket.c
net/rfkill/Kconfig
net/rfkill/core.c
net/wireless/scan.c
scripts/Makefile.clean
scripts/Makefile.lib
scripts/Makefile.modinst
security/Kconfig
security/commoncap.c
security/lsm_audit.c
security/selinux/hooks.c
security/selinux/include/classmap.h
security/selinux/ss/avtab.c
tools/perf/util/evsel.c

diff --git a/Documentation/ABI/testing/sysfs-class-dual-role-usb b/Documentation/ABI/testing/sysfs-class-dual-role-usb
new file mode 100644 (file)
index 0000000..a900fd7
--- /dev/null
@@ -0,0 +1,71 @@
+What:          /sys/class/dual_role_usb/.../
+Date:          June 2015
+Contact:       Badhri Jagan Sridharan<badhri@google.com>
+Description:
+               Provide a generic interface to monitor and change
+               the state of dual role usb ports. The name here
+               refers to the name mentioned in the
+               dual_role_phy_desc that is passed while registering
+               the dual_role_phy_intstance through
+               devm_dual_role_instance_register.
+
+What:           /sys/class/dual_role_usb/.../supported_modes
+Date:           June 2015
+Contact:        Badhri Jagan Sridharan<badhri@google.com>
+Description:
+               This is a static node, once initialized this
+               is not expected to change during runtime. "dfp"
+               refers to "downstream facing port" i.e. port can
+               only act as host. "ufp" refers to "upstream
+               facing port" i.e. port can only act as device.
+               "dfp ufp" refers to "dual role port" i.e. the port
+               can either be a host port or a device port.
+
+What:          /sys/class/dual_role_usb/.../mode
+Date:          June 2015
+Contact:       Badhri Jagan Sridharan<badhri@google.com>
+Description:
+               The mode node refers to the current mode in which the
+               port is operating. "dfp" for host ports. "ufp" for device
+               ports and "none" when cable is not connected.
+
+               On devices where the USB mode is software-controllable,
+               userspace can change the mode by writing "dfp" or "ufp".
+               On devices where the USB mode is fixed in hardware,
+               this attribute is read-only.
+
+What:          /sys/class/dual_role_usb/.../power_role
+Date:          June 2015
+Contact:       Badhri Jagan Sridharan<badhri@google.com>
+Description:
+               The power_role node mentions whether the port
+               is "sink"ing or "source"ing power. "none" if
+               they are not connected.
+
+               On devices implementing USB Power Delivery,
+               userspace can control the power role by writing "sink" or
+               "source". On devices without USB-PD, this attribute is
+               read-only.
+
+What:          /sys/class/dual_role_usb/.../data_role
+Date:          June 2015
+Contact:       Badhri Jagan Sridharan<badhri@google.com>
+Description:
+               The data_role node mentions whether the port
+               is acting as "host" or "device" for USB data connection.
+               "none" if there is no active data link.
+
+               On devices implementing USB Power Delivery, userspace
+               can control the data role by writing "host" or "device".
+               On devices without USB-PD, this attribute is read-only
+
+What:          /sys/class/dual_role_usb/.../powers_vconn
+Date:          June 2015
+Contact:       Badhri Jagan Sridharan<badhri@google.com>
+Description:
+               The powers_vconn node mentions whether the port
+               is supplying power for VCONN pin.
+
+               On devices with software control of VCONN,
+               userspace can disable the power supply to VCONN by writing "n",
+               or enable the power supply by writing "y".
diff --git a/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons
new file mode 100644 (file)
index 0000000..acb19b9
--- /dev/null
@@ -0,0 +1,16 @@
+What:          /sys/kernel/wakeup_reasons/last_resume_reason
+Date:          February 2014
+Contact:       Ruchi Kandoi <kandoiruchi@google.com>
+Description:
+               The /sys/kernel/wakeup_reasons/last_resume_reason is
+               used to report wakeup reasons after system exited suspend.
+
+What:          /sys/kernel/wakeup_reasons/last_suspend_time
+Date:          March 2015
+Contact:       jinqian <jinqian@google.com>
+Description:
+               The /sys/kernel/wakeup_reasons/last_suspend_time is
+               used to report time spent in last suspend cycle. It contains
+               two numbers (in seconds) separated by space. First number is
+               the time spent in suspend and resume processes. Second number
+               is the time spent in sleep state.
\ No newline at end of file
diff --git a/Documentation/android.txt b/Documentation/android.txt
new file mode 100644 (file)
index 0000000..0f40a78
--- /dev/null
@@ -0,0 +1,121 @@
+                               =============
+                               A N D R O I D
+                               =============
+
+Copyright (C) 2009 Google, Inc.
+Written by Mike Chan <mike@android.com>
+
+CONTENTS:
+---------
+
+1. Android
+  1.1 Required enabled config options
+  1.2 Required disabled config options
+  1.3 Recommended enabled config options
+2. Contact
+
+
+1. Android
+==========
+
+Android (www.android.com) is an open source operating system for mobile devices.
+This document describes configurations needed to run the Android framework on
+top of the Linux kernel.
+
+To see a working defconfig look at msm_defconfig or goldfish_defconfig
+which can be found at http://android.git.kernel.org in kernel/common.git
+and kernel/msm.git
+
+
+1.1 Required enabled config options
+-----------------------------------
+After building a standard defconfig, ensure that these options are enabled in
+your .config or defconfig if they are not already. Based off the msm_defconfig.
+You should keep the rest of the default options enabled in the defconfig
+unless you know what you are doing.
+
+ANDROID_PARANOID_NETWORK
+ASHMEM
+CONFIG_FB_MODE_HELPERS
+CONFIG_FONT_8x16
+CONFIG_FONT_8x8
+CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+DAB
+EARLYSUSPEND
+FB
+FB_CFB_COPYAREA
+FB_CFB_FILLRECT
+FB_CFB_IMAGEBLIT
+FB_DEFERRED_IO
+FB_TILEBLITTING
+HIGH_RES_TIMERS
+INOTIFY
+INOTIFY_USER
+INPUT_EVDEV
+INPUT_GPIO
+INPUT_MISC
+LEDS_CLASS
+LEDS_GPIO
+LOCK_KERNEL
+LkOGGER
+LOW_MEMORY_KILLER
+MISC_DEVICES
+NEW_LEDS
+NO_HZ
+POWER_SUPPLY
+PREEMPT
+RAMFS
+RTC_CLASS
+RTC_LIB
+SWITCH
+SWITCH_GPIO
+TMPFS
+UID_STAT
+UID16
+USB_FUNCTION
+USB_FUNCTION_ADB
+USER_WAKELOCK
+VIDEO_OUTPUT_CONTROL
+WAKELOCK
+YAFFS_AUTO_YAFFS2
+YAFFS_FS
+YAFFS_YAFFS1
+YAFFS_YAFFS2
+
+
+1.2 Required disabled config options
+------------------------------------
+CONFIG_YAFFS_DISABLE_LAZY_LOAD
+DNOTIFY
+
+
+1.3 Recommended enabled config options
+------------------------------
+ANDROID_PMEM
+PSTORE_CONSOLE
+PSTORE_RAM
+SCHEDSTATS
+DEBUG_PREEMPT
+DEBUG_MUTEXES
+DEBUG_SPINLOCK_SLEEP
+DEBUG_INFO
+FRAME_POINTER
+CPU_FREQ
+CPU_FREQ_TABLE
+CPU_FREQ_DEFAULT_GOV_ONDEMAND
+CPU_FREQ_GOV_ONDEMAND
+CRC_CCITT
+EMBEDDED
+INPUT_TOUCHSCREEN
+I2C
+I2C_BOARDINFO
+LOG_BUF_SHIFT=17
+SERIAL_CORE
+SERIAL_CORE_CONSOLE
+
+
+2. Contact
+==========
+website: http://android.git.kernel.org
+
+mailing-lists: android-kernel@googlegroups.com
index e840b47613f78f9f9efa6843cc90c997a36ccdce..bc5148757edb5db821cbdc42f20095a2097bcc13 100644 (file)
@@ -26,3 +26,9 @@ switching-sched.txt
        - Switching I/O schedulers at runtime
 writeback_cache_control.txt
        - Control of volatile write back caches
+mmc-max-speed.txt
+       - eMMC layer speed simulation, related to /sys/block/mmcblk*/
+          attributes:
+            max_read_speed
+            max_write_speed
+            cache_size
diff --git a/Documentation/block/mmc-max-speed.txt b/Documentation/block/mmc-max-speed.txt
new file mode 100644 (file)
index 0000000..3f052b9
--- /dev/null
@@ -0,0 +1,38 @@
+eMMC Block layer simulation speed controls in /sys/block/mmcblk*/
+===============================================
+
+Turned on with CONFIG_MMC_SIMULATE_MAX_SPEED which enables MMC device speed
+limiting. Used to test and simulate the behavior of the system when
+confronted with a slow MMC.
+
+Enables max_read_speed, max_write_speed and cache_size attributes and module
+default parameters to control the write or read maximum KB/second speed
+behaviors.
+
+NB: There is room for improving the algorithm for aspects tied directly to
+eMMC specific behavior. For instance, wear leveling and stalls from an
+exhausted erase pool. We would expect that if there was a need to provide
+similar speed simulation controls to other types of block devices, aspects of
+their behavior are modelled separately (e.g. head seek times, heat assist,
+shingling and rotational latency).
+
+/sys/block/mmcblk0/max_read_speed:
+
+Number of KB/second reads allowed to the block device. Used to test and
+simulate the behavior of the system when confronted with a slow reading MMC.
+Set to 0 or "off" to place no speed limit.
+
+/sys/block/mmcblk0/max_write_speed:
+
+Number of KB/second writes allowed to the block device. Used to test and
+simulate the behavior of the system when confronted with a slow writing MMC.
+Set to 0 or "off" to place no speed limit.
+
+/sys/block/mmcblk0/cache_size:
+
+Number of MB of high speed memory or high speed SLC cache expected on the
+eMMC device being simulated. Used to help simulate the write-back behavior
+more accurately. The assumption is the cache has no delay, but draws down
+in the background to the MLC/TLC primary store at the max_write_speed rate.
+Any write speed delays will show up when the cache is full, or when an I/O
+request to flush is issued.
index c15aa75f52275b703e3da46faff2a95e2ccfc6e9..ac8a37e0c76a6893d71735cde83d7dcb8e1c979f 100644 (file)
@@ -28,6 +28,7 @@ Contents:
 2.3  Userspace
 2.4  Ondemand
 2.5  Conservative
+2.6  Interactive
 
 3.   The Governor Interface in the CPUfreq Core
 
@@ -218,6 +219,90 @@ a decision on when to decrease the frequency while running in any
 speed. Load for frequency increase is still evaluated every
 sampling rate.
 
+2.6 Interactive
+---------------
+
+The CPUfreq governor "interactive" is designed for latency-sensitive,
+interactive workloads. This governor sets the CPU speed depending on
+usage, similar to "ondemand" and "conservative" governors, but with a
+different set of configurable behaviors.
+
+The tuneable values for this governor are:
+
+target_loads: CPU load values used to adjust speed to influence the
+current CPU load toward that value.  In general, the lower the target
+load, the more often the governor will raise CPU speeds to bring load
+below the target.  The format is a single target load, optionally
+followed by pairs of CPU speeds and CPU loads to target at or above
+those speeds.  Colons can be used between the speeds and associated
+target loads for readability.  For example:
+
+   85 1000000:90 1700000:99
+
+targets CPU load 85% below speed 1GHz, 90% at or above 1GHz, until
+1.7GHz and above, at which load 99% is targeted.  If speeds are
+specified these must appear in ascending order.  Higher target load
+values are typically specified for higher speeds, that is, target load
+values also usually appear in an ascending order. The default is
+target load 90% for all speeds.
+
+min_sample_time: The minimum amount of time to spend at the current
+frequency before ramping down. Default is 80000 uS.
+
+hispeed_freq: An intermediate "hi speed" at which to initially ramp
+when CPU load hits the value specified in go_hispeed_load.  If load
+stays high for the amount of time specified in above_hispeed_delay,
+then speed may be bumped higher.  Default is the maximum speed
+allowed by the policy at governor initialization time.
+
+go_hispeed_load: The CPU load at which to ramp to hispeed_freq.
+Default is 99%.
+
+above_hispeed_delay: When speed is at or above hispeed_freq, wait for
+this long before raising speed in response to continued high load.
+The format is a single delay value, optionally followed by pairs of
+CPU speeds and the delay to use at or above those speeds.  Colons can
+be used between the speeds and associated delays for readability.  For
+example:
+
+   80000 1300000:200000 1500000:40000
+
+uses delay 80000 uS until CPU speed 1.3 GHz, at which speed delay
+200000 uS is used until speed 1.5 GHz, at which speed (and above)
+delay 40000 uS is used.  If speeds are specified these must appear in
+ascending order.  Default is 20000 uS.
+
+timer_rate: Sample rate for reevaluating CPU load when the CPU is not
+idle.  A deferrable timer is used, such that the CPU will not be woken
+from idle to service this timer until something else needs to run.
+(The maximum time to allow deferring this timer when not running at
+minimum speed is configurable via timer_slack.)  Default is 20000 uS.
+
+timer_slack: Maximum additional time to defer handling the governor
+sampling timer beyond timer_rate when running at speeds above the
+minimum.  For platforms that consume additional power at idle when
+CPUs are running at speeds greater than minimum, this places an upper
+bound on how long the timer will be deferred prior to re-evaluating
+load and dropping speed.  For example, if timer_rate is 20000uS and
+timer_slack is 10000uS then timers will be deferred for up to 30msec
+when not at lowest speed.  A value of -1 means defer timers
+indefinitely at all speeds.  Default is 80000 uS.
+
+boost: If non-zero, immediately boost speed of all CPUs to at least
+hispeed_freq until zero is written to this attribute.  If zero, allow
+CPU speeds to drop below hispeed_freq according to load as usual.
+Default is zero.
+
+boostpulse: On each write, immediately boost speed of all CPUs to
+hispeed_freq for at least the period of time specified by
+boostpulse_duration, after which speeds are allowed to drop below
+hispeed_freq according to load as usual.
+
+boostpulse_duration: Length of time to hold CPU speed at hispeed_freq
+on a write to boostpulse, before allowing speed to drop according to
+load as usual.  Default is 80000 uS.
+
+
 3. The Governor Interface in the CPUfreq Core
 =============================================
 
diff --git a/Documentation/device-mapper/boot.txt b/Documentation/device-mapper/boot.txt
new file mode 100644 (file)
index 0000000..adcaad5
--- /dev/null
@@ -0,0 +1,42 @@
+Boot time creation of mapped devices
+===================================
+
+It is possible to configure a device mapper device to act as the root
+device for your system in two ways.
+
+The first is to build an initial ramdisk which boots to a minimal
+userspace which configures the device, then pivot_root(8) in to it.
+
+For simple device mapper configurations, it is possible to boot directly
+using the following kernel command line:
+
+dm="<name> <uuid> <ro>,table line 1,...,table line n"
+
+name = the name to associate with the device
+       after boot, udev, if used, will use that name to label
+       the device node.
+uuid = may be 'none' or the UUID desired for the device.
+ro = may be "ro" or "rw".  If "ro", the device and device table will be
+       marked read-only.
+
+Each table line may be as normal when using the dmsetup tool except for
+two variations:
+1. Any use of commas will be interpreted as a newline
+2. Quotation marks cannot be escaped and cannot be used without
+   terminating the dm= argument.
+
+Unless renamed by udev, the device node created will be dm-0 as the
+first minor number for the device-mapper is used during early creation.
+
+Example
+=======
+
+- Booting to a linear array made up of user-mode linux block devices:
+
+  dm="lroot none 0, 0 4096 linear 98:16 0, 4096 4096 linear 98:32 0" \
+  root=/dev/dm-0
+
+Will boot to a rw dm-linear target of 8192 sectors split across two
+block devices identified by their major:minor numbers.  After boot, udev
+will rename this target to /dev/mapper/lroot (depending on the rules).
+No uuid was assigned.
index e15bc1a0fb98ab23563681210cc6ed1865234816..89fd8f9a259f69b9c9423da9bb16771ed0596cad 100644 (file)
@@ -18,11 +18,11 @@ Construction Parameters
 
     0 is the original format used in the Chromium OS.
       The salt is appended when hashing, digests are stored continuously and
-      the rest of the block is padded with zeros.
+      the rest of the block is padded with zeroes.
 
     1 is the current format that should be used for new devices.
       The salt is prepended when hashing and each digest is
-      padded with zeros to the power of two.
+      padded with zeroes to the power of two.
 
 <dev>
     This is the device containing data, the integrity of which needs to be
@@ -79,6 +79,37 @@ restart_on_corruption
     not compatible with ignore_corruption and requires user space support to
     avoid restart loops.
 
+ignore_zero_blocks
+    Do not verify blocks that are expected to contain zeroes and always return
+    zeroes instead. This may be useful if the partition contains unused blocks
+    that are not guaranteed to contain zeroes.
+
+use_fec_from_device <fec_dev>
+    Use forward error correction (FEC) to recover from corruption if hash
+    verification fails. Use encoding data from the specified device. This
+    may be the same device where data and hash blocks reside, in which case
+    fec_start must be outside data and hash areas.
+
+    If the encoding data covers additional metadata, it must be accessible
+    on the hash device after the hash blocks.
+
+    Note: block sizes for data and hash devices must match. Also, if the
+    verity <dev> is encrypted the <fec_dev> should be too.
+
+fec_roots <num>
+    Number of generator roots. This equals to the number of parity bytes in
+    the encoding data. For example, in RS(M, N) encoding, the number of roots
+    is M-N.
+
+fec_blocks <num>
+    The number of encoding data blocks on the FEC device. The block size for
+    the FEC device is <data_block_size>.
+
+fec_start <offset>
+    This is the offset, in <data_block_size> blocks, from the start of the
+    FEC device to the beginning of the encoding data.
+
+
 Theory of operation
 ===================
 
@@ -98,6 +129,11 @@ per-block basis. This allows for a lightweight hash computation on first read
 into the page cache. Block hashes are stored linearly, aligned to the nearest
 block size.
 
+If forward error correction (FEC) support is enabled any recovery of
+corrupted data will be verified using the cryptographic hash of the
+corresponding data. This is why combining error correction with
+integrity checking is essential.
+
 Hash Tree
 ---------
 
diff --git a/Documentation/devicetree/bindings/goldfish/audio.txt b/Documentation/devicetree/bindings/goldfish/audio.txt
new file mode 100644 (file)
index 0000000..d043fda
--- /dev/null
@@ -0,0 +1,17 @@
+Android Goldfish Audio
+
+Android goldfish audio device generated by android emulator.
+
+Required properties:
+
+- compatible : should contain "google,goldfish-audio" to match emulator
+- reg        : <registers mapping>
+- interrupts : <interrupt mapping>
+
+Example:
+
+       goldfish_audio@9030000 {
+               compatible = "google,goldfish-audio";
+               reg = <0x9030000 0x100>;
+               interrupts = <0x4>;
+       };
diff --git a/Documentation/devicetree/bindings/goldfish/battery.txt b/Documentation/devicetree/bindings/goldfish/battery.txt
new file mode 100644 (file)
index 0000000..4fb6139
--- /dev/null
@@ -0,0 +1,17 @@
+Android Goldfish Battery
+
+Android goldfish battery device generated by android emulator.
+
+Required properties:
+
+- compatible : should contain "google,goldfish-battery" to match emulator
+- reg        : <registers mapping>
+- interrupts : <interrupt mapping>
+
+Example:
+
+       goldfish_battery@9020000 {
+               compatible = "google,goldfish-battery";
+               reg = <0x9020000 0x1000>;
+               interrupts = <0x3>;
+       };
diff --git a/Documentation/devicetree/bindings/goldfish/events.txt b/Documentation/devicetree/bindings/goldfish/events.txt
new file mode 100644 (file)
index 0000000..5babf46
--- /dev/null
@@ -0,0 +1,17 @@
+Android Goldfish Events Keypad
+
+Android goldfish events keypad device generated by android emulator.
+
+Required properties:
+
+- compatible : should contain "google,goldfish-events-keypad" to match emulator
+- reg        : <registers mapping>
+- interrupts : <interrupt mapping>
+
+Example:
+
+       goldfish-events@9040000 {
+               compatible = "google,goldfish-events-keypad";
+               reg = <0x9040000 0x1000>;
+               interrupts = <0x5>;
+       };
diff --git a/Documentation/devicetree/bindings/goldfish/tty.txt b/Documentation/devicetree/bindings/goldfish/tty.txt
new file mode 100644 (file)
index 0000000..8264827
--- /dev/null
@@ -0,0 +1,17 @@
+Android Goldfish TTY
+
+Android goldfish tty device generated by android emulator.
+
+Required properties:
+
+- compatible : should contain "google,goldfish-tty" to match emulator
+- reg        : <registers mapping>
+- interrupts : <interrupt mapping>
+
+Example:
+
+       goldfish_tty@1f004000 {
+               compatible = "google,goldfish-tty";
+               reg = <0x1f004000 0x1000>;
+               interrupts = <0xc>;
+       };
diff --git a/Documentation/devicetree/bindings/misc/memory-state-time.txt b/Documentation/devicetree/bindings/misc/memory-state-time.txt
new file mode 100644 (file)
index 0000000..c99a506
--- /dev/null
@@ -0,0 +1,8 @@
+Memory bandwidth and frequency state tracking
+
+Required properties:
+- compatible : should be:
+       "memory-state-time"
+- freq-tbl: Should contain entries with each frequency in Hz.
+- bw-buckets: Should contain upper-bound limits for each bandwidth bucket in Mbps.
+       Must match the framework power_profile.xml for the device.
diff --git a/Documentation/devicetree/bindings/misc/ramoops.txt b/Documentation/devicetree/bindings/misc/ramoops.txt
new file mode 100644 (file)
index 0000000..5a475fa
--- /dev/null
@@ -0,0 +1,43 @@
+Ramoops oops/panic logger
+=========================
+
+ramoops provides persistent RAM storage for oops and panics, so they can be
+recovered after a reboot.
+
+Parts of this storage may be set aside for other persistent log buffers, such
+as kernel log messages, or for optional ECC error-correction data.  The total
+size of these optional buffers must fit in the reserved region.
+
+Any remaining space will be used for a circular buffer of oops and panic
+records.  These records have a configurable size, with a size of 0 indicating
+that they should be disabled.
+
+
+Required properties:
+
+- compatible: must be "ramoops"
+
+- memory-region: phandle to a region of memory that is preserved between reboots
+
+
+Optional properties:
+
+- ecc-size: enables ECC support and specifies ECC buffer size in bytes
+  (defaults to no ECC)
+
+- record-size: maximum size in bytes of each dump done on oops/panic
+  (defaults to 0)
+
+- console-size: size in bytes of log buffer reserved for kernel messages
+  (defaults to 0)
+
+- ftrace-size: size in bytes of log buffer reserved for function tracing and
+  profiling (defaults to 0)
+
+- pmsg-size: size in bytes of log buffer reserved for userspace messages
+  (defaults to 0)
+
+- unbuffered: if present, use unbuffered mappings to map the reserved region
+  (defaults to buffered mappings)
+
+- no-dump-oops: if present, only dump panics (defaults to panics and oops)
diff --git a/Documentation/devicetree/bindings/scheduler/sched-energy-costs.txt b/Documentation/devicetree/bindings/scheduler/sched-energy-costs.txt
new file mode 100644 (file)
index 0000000..11216f0
--- /dev/null
@@ -0,0 +1,360 @@
+===========================================================
+Energy cost bindings for Energy Aware Scheduling
+===========================================================
+
+===========================================================
+1 - Introduction
+===========================================================
+
+This note specifies bindings required for energy-aware scheduling
+(EAS)[1]. Historically, the scheduler's primary objective has been
+performance.  EAS aims to provide an alternative objective - energy
+efficiency. EAS relies on a simple platform energy cost model to
+guide scheduling decisions.  The model only considers the CPU
+subsystem.
+
+This note is aligned with the definition of the layout of physical
+CPUs in the system as described in the ARM topology binding
+description [2]. The concept is applicable to any system so long as
+the cost model data is provided for those processing elements in
+that system's topology that EAS is required to service.
+
+Processing elements refer to hardware threads, CPUs and clusters of
+related CPUs in increasing order of hierarchy.
+
+EAS requires two key cost metrics - busy costs and idle costs. Busy
+costs comprise of a list of compute capacities for the processing
+element in question and the corresponding power consumption at that
+capacity.  Idle costs comprise of a list of power consumption values
+for each idle state [C-state] that the processing element supports.
+For a detailed description of these metrics, their derivation and
+their use see [3].
+
+These cost metrics are required for processing elements in all
+scheduling domain levels that EAS is required to service.
+
+===========================================================
+2 - energy-costs node
+===========================================================
+
+Energy costs for the processing elements in scheduling domains that
+EAS is required to service are defined in the energy-costs node
+which acts as a container for the actual per processing element cost
+nodes. A single energy-costs node is required for a given system.
+
+- energy-costs node
+
+       Usage: Required
+
+       Description: The energy-costs node is a container node and
+       it's sub-nodes describe costs for each processing element at
+       all scheduling domain levels that EAS is required to
+       service.
+
+       Node name must be "energy-costs".
+
+       The energy-costs node's parent node must be the cpus node.
+
+       The energy-costs node's child nodes can be:
+
+       - one or more cost nodes.
+
+       Any other configuration is considered invalid.
+
+The energy-costs node can only contain a single type of child node
+whose bindings are described in paragraph 4.
+
+===========================================================
+3 - energy-costs node child nodes naming convention
+===========================================================
+
+energy-costs child nodes must follow a naming convention where the
+node name must be "thread-costN", "core-costN", "cluster-costN"
+depending on whether the costs in the node are for a thread, core or
+cluster.  N (where N = {0, 1, ...}) is the node number and has no
+bearing to the OS' logical thread, core or cluster index.
+
+===========================================================
+4 - cost node bindings
+===========================================================
+
+Bindings for cost nodes are defined as follows:
+
+- cluster-cost node
+
+       Description: must be declared within an energy-costs node. A
+       system can contain multiple clusters and each cluster
+       serviced by EAS must have a corresponding cluster-costs
+       node.
+
+       The cluster-cost node name must be "cluster-costN" as
+       described in 3 above.
+
+       A cluster-cost node must be a leaf node with no children.
+
+       Properties for cluster-cost nodes are described in paragraph
+       5 below.
+
+       Any other configuration is considered invalid.
+
+- core-cost node
+
+       Description: must be declared within an energy-costs node. A
+       system can contain multiple cores and each core serviced by
+       EAS must have a corresponding core-cost node.
+
+       The core-cost node name must be "core-costN" as described in
+       3 above.
+
+       A core-cost node must be a leaf node with no children.
+
+       Properties for core-cost nodes are described in paragraph
+       5 below.
+
+       Any other configuration is considered invalid.
+
+- thread-cost node
+
+       Description: must be declared within an energy-costs node. A
+       system can contain cores with multiple hardware threads and
+       each thread serviced by EAS must have a corresponding
+       thread-cost node.
+
+       The core-cost node name must be "core-costN" as described in
+       3 above.
+
+       A core-cost node must be a leaf node with no children.
+
+       Properties for thread-cost nodes are described in paragraph
+       5 below.
+
+       Any other configuration is considered invalid.
+
+===========================================================
+5 - Cost node properties
+==========================================================
+
+All cost node types must have only the following properties:
+
+- busy-cost-data
+
+       Usage: required
+       Value type: An array of 2-item tuples. Each item is of type
+       u32.
+       Definition: The first item in the tuple is the capacity
+       value as described in [3]. The second item in the tuple is
+       the energy cost value as described in [3].
+
+- idle-cost-data
+
+       Usage: required
+       Value type: An array of 1-item tuples. The item is of type
+       u32.
+       Definition: The item in the tuple is the energy cost value
+       as described in [3].
+
+===========================================================
+4 - Extensions to the cpu node
+===========================================================
+
+The cpu node is extended with a property that establishes the
+connection between the processing element represented by the cpu
+node and the cost-nodes associated with this processing element.
+
+The connection is expressed in line with the topological hierarchy
+that this processing element belongs to starting with the level in
+the hierarchy that this processing element itself belongs to through
+to the highest level that EAS is required to service.  The
+connection cannot be sparse and must be contiguous from the
+processing element's level through to the highest desired level. The
+highest desired level must be the same for all processing elements.
+
+Example: Given that a cpu node may represent a thread that is a part
+of a core, this property may contain multiple elements which
+associate the thread with cost nodes describing the costs for the
+thread itself, the core the thread belongs to, the cluster the core
+belongs to and so on. The elements must be ordered from the lowest
+level nodes to the highest desired level that EAS must service. The
+highest desired level must be the same for all cpu nodes. The
+elements must not be sparse: there must be elements for the current
+thread, the next level of hierarchy (core) and so on without any
+'holes'.
+
+Example: Given that a cpu node may represent a core that is a part
+of a cluster of related cpus this property may contain multiple
+elements which associate the core with cost nodes describing the
+costs for the core itself, the cluster the core belongs to and so
+on. The elements must be ordered from the lowest level nodes to the
+highest desired level that EAS must service. The highest desired
+level must be the same for all cpu nodes. The elements must not be
+sparse: there must be elements for the current thread, the next
+level of hierarchy (core) and so on without any 'holes'.
+
+If the system comprises of hierarchical clusters of clusters, this
+property will contain multiple associations with the relevant number
+of cluster elements in hierarchical order.
+
+Property added to the cpu node:
+
+- sched-energy-costs
+
+       Usage: required
+       Value type: List of phandles
+       Definition: a list of phandles to specific cost nodes in the
+       energy-costs parent node that correspond to the processing
+       element represented by this cpu node in hierarchical order
+       of topology.
+
+       The order of phandles in the list is significant. The first
+       phandle is to the current processing element's own cost
+       node.  Subsequent phandles are to higher hierarchical level
+       cost nodes up until the maximum level that EAS is to
+       service.
+
+       All cpu nodes must have the same highest level cost node.
+
+       The phandle list must not be sparsely populated with handles
+       to non-contiguous hierarchical levels. See commentary above
+       for clarity.
+
+       Any other configuration is invalid.
+
+===========================================================
+5 - Example dts
+===========================================================
+
+Example 1 (ARM 64-bit, 6-cpu system, two clusters of cpus, one
+cluster of 2 Cortex-A57 cpus, one cluster of 4 Cortex-A53 cpus):
+
+cpus {
+       #address-cells = <2>;
+       #size-cells = <0>;
+       .
+       .
+       .
+       A57_0: cpu@0 {
+               compatible = "arm,cortex-a57","arm,armv8";
+               reg = <0x0 0x0>;
+               device_type = "cpu";
+               enable-method = "psci";
+               next-level-cache = <&A57_L2>;
+               clocks = <&scpi_dvfs 0>;
+               cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+               sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
+       };
+
+       A57_1: cpu@1 {
+               compatible = "arm,cortex-a57","arm,armv8";
+               reg = <0x0 0x1>;
+               device_type = "cpu";
+               enable-method = "psci";
+               next-level-cache = <&A57_L2>;
+               clocks = <&scpi_dvfs 0>;
+               cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+               sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>;
+       };
+
+       A53_0: cpu@100 {
+               compatible = "arm,cortex-a53","arm,armv8";
+               reg = <0x0 0x100>;
+               device_type = "cpu";
+               enable-method = "psci";
+               next-level-cache = <&A53_L2>;
+               clocks = <&scpi_dvfs 1>;
+               cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+               sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
+       };
+
+       A53_1: cpu@101 {
+               compatible = "arm,cortex-a53","arm,armv8";
+               reg = <0x0 0x101>;
+               device_type = "cpu";
+               enable-method = "psci";
+               next-level-cache = <&A53_L2>;
+               clocks = <&scpi_dvfs 1>;
+               cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+               sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
+       };
+
+       A53_2: cpu@102 {
+               compatible = "arm,cortex-a53","arm,armv8";
+               reg = <0x0 0x102>;
+               device_type = "cpu";
+               enable-method = "psci";
+               next-level-cache = <&A53_L2>;
+               clocks = <&scpi_dvfs 1>;
+               cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+               sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
+       };
+
+       A53_3: cpu@103 {
+               compatible = "arm,cortex-a53","arm,armv8";
+               reg = <0x0 0x103>;
+               device_type = "cpu";
+               enable-method = "psci";
+               next-level-cache = <&A53_L2>;
+               clocks = <&scpi_dvfs 1>;
+               cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+               sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>;
+       };
+
+       energy-costs {
+               CPU_COST_0: core-cost0 {
+                       busy-cost-data = <
+                               417   168
+                               579   251
+                               744   359
+                               883   479
+                               1024  616
+                       >;
+                       idle-cost-data = <
+                               15
+                               0
+                       >;
+               };
+               CPU_COST_1: core-cost1 {
+                       busy-cost-data = <
+                               235 33
+                               302 46
+                               368 61
+                               406 76
+                               447 93
+                       >;
+                       idle-cost-data = <
+                               6
+                               0
+                       >;
+               };
+               CLUSTER_COST_0: cluster-cost0 {
+                       busy-cost-data = <
+                               417   24
+                               579   32
+                               744   43
+                               883   49
+                               1024  64
+                       >;
+                       idle-cost-data = <
+                               65
+                               24
+                       >;
+               };
+               CLUSTER_COST_1: cluster-cost1 {
+                       busy-cost-data = <
+                               235 26
+                               303 30
+                               368 39
+                               406 47
+                               447 57
+                       >;
+                       idle-cost-data = <
+                               56
+                               17
+                       >;
+               };
+       };
+};
+
+===============================================================================
+[1] https://lkml.org/lkml/2015/5/12/728
+[2] Documentation/devicetree/bindings/topology.txt
+[3] Documentation/scheduler/sched-energy.txt
index 6716413c17ba5988e4b8cbccace13784c22e935a..fea4777a569594d9b4ed7af58f82ddfa0b690dfb 100644 (file)
@@ -43,6 +43,7 @@ Table of Contents
   3.7   /proc/<pid>/task/<tid>/children - Information about task children
   3.8   /proc/<pid>/fdinfo/<fd> - Information about opened file
   3.9   /proc/<pid>/map_files - Information about memory mapped files
+  3.10  /proc/<pid>/timerslack_ns - Task timerslack value
 
   4    Configuring procfs
   4.1  Mount options
@@ -380,6 +381,8 @@ is not associated with a file:
  [stack]                  = the stack of the main process
  [vdso]                   = the "virtual dynamic shared object",
                             the kernel system call handler
+ [anon:<name>]            = an anonymous mapping that has been
+                            named by userspace
 
  or if empty, the mapping is anonymous.
 
@@ -432,6 +435,7 @@ KernelPageSize:        4 kB
 MMUPageSize:           4 kB
 Locked:                0 kB
 VmFlags: rd ex mr mw me dw
+Name:           name from userspace
 
 the first of these lines shows the same information as is displayed for the
 mapping in /proc/PID/maps.  The remaining lines show the size of the mapping
@@ -494,6 +498,9 @@ Note that there is no guarantee that every flag and associated mnemonic will
 be present in all further kernel releases. Things get changed, the flags may
 be vanished or the reverse -- new added.
 
+The "Name" field will only be present on a mapping that has been named by
+userspace, and will show the name passed in by userspace.
+
 This file is only present if the CONFIG_MMU kernel configuration option is
 enabled.
 
@@ -1847,6 +1854,23 @@ time one can open(2) mappings from the listings of two processes and
 comparing their inode numbers to figure out which anonymous memory areas
 are actually shared.
 
+3.10   /proc/<pid>/timerslack_ns - Task timerslack value
+---------------------------------------------------------
+This file provides the value of the task's timerslack value in nanoseconds.
+This value specifies a amount of time that normal timers may be deferred
+in order to coalesce timers and avoid unnecessary wakeups.
+
+This allows a task's interactivity vs power consumption trade off to be
+adjusted.
+
+Writing 0 to the file will set the tasks timerslack to the default value.
+
+Valid values are from 0 - ULLONG_MAX
+
+An application setting the value must have PTRACE_MODE_ATTACH_FSCREDS level
+permissions on the task specified to change its timerslack_ns value.
+
+
 ------------------------------------------------------------------------------
 Configuring procfs
 ------------------------------------------------------------------------------
index 5ee92cc9e57801cd2a61832f819dff88d784c67a..583935f88ee8247608af8d46c3af6ca16f3ad67f 100644 (file)
@@ -56,6 +56,7 @@ parameter is applicable:
        BLACKFIN Blackfin architecture is enabled.
        CLK     Common clock infrastructure is enabled.
        CMA     Contiguous Memory Area support is enabled.
+       DM      Device mapper support is enabled.
        DRM     Direct Rendering Management support is enabled.
        DYNAMIC_DEBUG Build in debug messages and enable them at runtime
        EDD     BIOS Enhanced Disk Drive Services (EDD) is enabled
@@ -915,6 +916,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        dis_ucode_ldr   [X86] Disable the microcode loader.
 
+       dm=             [DM] Allows early creation of a device-mapper device.
+                       See Documentation/device-mapper/boot.txt.
+
+       dmasound=       [HW,OSS] Sound subsystem buff
+
        dma_debug=off   If the kernel is compiled with DMA_API_DEBUG support,
                        this option disables the debugging code at boot.
 
index 2ea4c45cf1c8736ccd577eff75058bb32ed0ca95..2042261408b9616712b56e0ed3db642b8221975f 100644 (file)
@@ -584,6 +584,16 @@ tcp_fastopen - INTEGER
 
        See include/net/tcp.h and the code for more details.
 
+tcp_fwmark_accept - BOOLEAN
+       If set, incoming connections to listening sockets that do not have a
+       socket mark will set the mark of the accepting socket to the fwmark of
+       the incoming SYN packet. This will cause all packets on that connection
+       (starting from the first SYNACK) to be sent with that fwmark. The
+       listening socket's mark is unchanged. Listening sockets that already
+       have a fwmark set via setsockopt(SOL_SOCKET, SO_MARK, ...) are
+       unaffected.
+       Default: 0
+
 tcp_syn_retries - INTEGER
        Number of times initial SYNs for an active TCP connection attempt
        will be retransmitted. Should not be higher than 255. Default value
index 5d8675615e59c40c6564710a0a9b73ae060e2a00..9264bcab4099a1e2198eb0acaf79c95ac4c62cb5 100644 (file)
@@ -45,7 +45,7 @@ corrupt, but usually it is restorable.
 
 2. Setting the parameters
 
-Setting the ramoops parameters can be done in 2 different manners:
+Setting the ramoops parameters can be done in 3 different manners:
  1. Use the module parameters (which have the names of the variables described
  as before).
  For quick debugging, you can also reserve parts of memory during boot
@@ -54,7 +54,9 @@ Setting the ramoops parameters can be done in 2 different manners:
  kernel to use only the first 128 MB of memory, and place ECC-protected ramoops
  region at 128 MB boundary:
  "mem=128M ramoops.mem_address=0x8000000 ramoops.ecc=1"
- 2. Use a platform device and set the platform data. The parameters can then
+ 2. Use Device Tree bindings, as described in
+ Documentation/device-tree/bindings/misc/ramoops.txt.
+ 3. Use a platform device and set the platform data. The parameters can then
  be set through that platform data. An example of doing that is:
 
 #include <linux/pstore_ram.h>
diff --git a/Documentation/scheduler/sched-energy.txt b/Documentation/scheduler/sched-energy.txt
new file mode 100644 (file)
index 0000000..dab2f90
--- /dev/null
@@ -0,0 +1,362 @@
+Energy cost model for energy-aware scheduling (EXPERIMENTAL)
+
+Introduction
+=============
+
+The basic energy model uses platform energy data stored in sched_group_energy
+data structures attached to the sched_groups in the sched_domain hierarchy. The
+energy cost model offers two functions that can be used to guide scheduling
+decisions:
+
+1.     static unsigned int sched_group_energy(struct energy_env *eenv)
+2.     static int energy_diff(struct energy_env *eenv)
+
+sched_group_energy() estimates the energy consumed by all cpus in a specific
+sched_group including any shared resources owned exclusively by this group of
+cpus. Resources shared with other cpus are excluded (e.g. later level caches).
+
+energy_diff() estimates the total energy impact of a utilization change. That
+is, adding, removing, or migrating utilization (tasks).
+
+Both functions use a struct energy_env to specify the scenario to be evaluated:
+
+       struct energy_env {
+               struct sched_group      *sg_top;
+               struct sched_group      *sg_cap;
+               int                     cap_idx;
+               int                     util_delta;
+               int                     src_cpu;
+               int                     dst_cpu;
+               int                     energy;
+       };
+
+sg_top: sched_group to be evaluated. Not used by energy_diff().
+
+sg_cap: sched_group covering the cpus in the same frequency domain. Set by
+sched_group_energy().
+
+cap_idx: Capacity state to be used for energy calculations. Set by
+find_new_capacity().
+
+util_delta: Amount of utilization to be added, removed, or migrated.
+
+src_cpu: Source cpu from where 'util_delta' utilization is removed. Should be
+-1 if no source (e.g. task wake-up).
+
+dst_cpu: Destination cpu where 'util_delta' utilization is added. Should be -1
+if utilization is removed (e.g. terminating tasks).
+
+energy: Result of sched_group_energy().
+
+The metric used to represent utilization is the actual per-entity running time
+averaged over time using a geometric series. Very similar to the existing
+per-entity load-tracking, but _not_ scaled by task priority and capped by the
+capacity of the cpu. The latter property does mean that utilization may
+underestimate the compute requirements for task on fully/over utilized cpus.
+The greatest potential for energy savings without affecting performance too much
+is scenarios where the system isn't fully utilized. If the system is deemed
+fully utilized load-balancing should be done with task load (includes task
+priority) instead in the interest of fairness and performance.
+
+
+Background and Terminology
+===========================
+
+To make it clear from the start:
+
+energy = [joule] (resource like a battery on powered devices)
+power = energy/time = [joule/second] = [watt]
+
+The goal of energy-aware scheduling is to minimize energy, while still getting
+the job done. That is, we want to maximize:
+
+       performance [inst/s]
+       --------------------
+           power [W]
+
+which is equivalent to minimizing:
+
+       energy [J]
+       -----------
+       instruction
+
+while still getting 'good' performance. It is essentially an alternative
+optimization objective to the current performance-only objective for the
+scheduler. This alternative considers two objectives: energy-efficiency and
+performance. Hence, there needs to be a user controllable knob to switch the
+objective. Since it is early days, this is currently a sched_feature
+(ENERGY_AWARE).
+
+The idea behind introducing an energy cost model is to allow the scheduler to
+evaluate the implications of its decisions rather than applying energy-saving
+techniques blindly that may only have positive effects on some platforms. At
+the same time, the energy cost model must be as simple as possible to minimize
+the scheduler latency impact.
+
+Platform topology
+------------------
+
+The system topology (cpus, caches, and NUMA information, not peripherals) is
+represented in the scheduler by the sched_domain hierarchy which has
+sched_groups attached at each level that covers one or more cpus (see
+sched-domains.txt for more details). To add energy awareness to the scheduler
+we need to consider power and frequency domains.
+
+Power domain:
+
+A power domain is a part of the system that can be powered on/off
+independently. Power domains are typically organized in a hierarchy where you
+may be able to power down just a cpu or a group of cpus along with any
+associated resources (e.g.  shared caches). Powering up a cpu means that all
+power domains it is a part of in the hierarchy must be powered up. Hence, it is
+more expensive to power up the first cpu that belongs to a higher level power
+domain than powering up additional cpus in the same high level domain. Two
+level power domain hierarchy example:
+
+               Power source
+                        +-------------------------------+----...
+per group PD            G                               G
+                        |           +----------+        |
+                   +--------+-------| Shared   |  (other groups)
+per-cpu PD         G        G       | resource |
+                   |        |       +----------+
+               +-------+ +-------+
+               | CPU 0 | | CPU 1 |
+               +-------+ +-------+
+
+Frequency domain:
+
+Frequency domains (P-states) typically cover the same group of cpus as one of
+the power domain levels. That is, there might be several smaller power domains
+sharing the same frequency (P-state) or there might be a power domain spanning
+multiple frequency domains.
+
+From a scheduling point of view there is no need to know the actual frequencies
+[Hz]. All the scheduler cares about is the compute capacity available at the
+current state (P-state) the cpu is in and any other available states. For that
+reason, and to also factor in any cpu micro-architecture differences, compute
+capacity scaling states are called 'capacity states' in this document. For SMP
+systems this is equivalent to P-states. For mixed micro-architecture systems
+(like ARM big.LITTLE) it is P-states scaled according to the micro-architecture
+performance relative to the other cpus in the system.
+
+Energy modelling:
+------------------
+
+Due to the hierarchical nature of the power domains, the most obvious way to
+model energy costs is therefore to associate power and energy costs with
+domains (groups of cpus). Energy costs of shared resources are associated with
+the group of cpus that share the resources, only the cost of powering the
+cpu itself and any private resources (e.g. private L1 caches) is associated
+with the per-cpu groups (lowest level).
+
+For example, for an SMP system with per-cpu power domains and a cluster level
+(group of cpus) power domain we get the overall energy costs to be:
+
+       energy = energy_cluster + n * energy_cpu
+
+where 'n' is the number of cpus powered up and energy_cluster is the cost paid
+as soon as any cpu in the cluster is powered up.
+
+The power and frequency domains can naturally be mapped onto the existing
+sched_domain hierarchy and sched_groups by adding the necessary data to the
+existing data structures.
+
+The energy model considers energy consumption from two contributors (shown in
+the illustration below):
+
+1. Busy energy: Energy consumed while a cpu and the higher level groups that it
+belongs to are busy running tasks. Busy energy is associated with the state of
+the cpu, not an event. The time the cpu spends in this state varies. Thus, the
+most obvious platform parameter for this contribution is busy power
+(energy/time).
+
+2. Idle energy: Energy consumed while a cpu and higher level groups that it
+belongs to are idle (in a C-state). Like busy energy, idle energy is associated
+with the state of the cpu. Thus, the platform parameter for this contribution
+is idle power (energy/time).
+
+Energy consumed during transitions from an idle-state (C-state) to a busy state
+(P-state) or going the other way is ignored by the model to simplify the energy
+model calculations.
+
+
+       Power
+       ^
+       |            busy->idle             idle->busy
+       |            transition             transition
+       |
+       |                _                      __
+       |               / \                    /  \__________________
+       |______________/   \                  /
+       |                   \                /
+       |  Busy              \    Idle      /        Busy
+       |  low P-state        \____________/         high P-state
+       |
+       +------------------------------------------------------------> time
+
+Busy    |--------------|                          |-----------------|
+
+Wakeup                 |------|            |------|
+
+Idle                          |------------|
+
+
+The basic algorithm
+====================
+
+The basic idea is to determine the total energy impact when utilization is
+added or removed by estimating the impact at each level in the sched_domain
+hierarchy starting from the bottom (sched_group contains just a single cpu).
+The energy cost comes from busy time (sched_group is awake because one or more
+cpus are busy) and idle time (in an idle-state). Energy model numbers account
+for energy costs associated with all cpus in the sched_group as a group.
+
+       for_each_domain(cpu, sd) {
+               sg = sched_group_of(cpu)
+               energy_before = curr_util(sg) * busy_power(sg)
+                               + (1-curr_util(sg)) * idle_power(sg)
+               energy_after = new_util(sg) * busy_power(sg)
+                               + (1-new_util(sg)) * idle_power(sg)
+               energy_diff += energy_before - energy_after
+
+       }
+
+       return energy_diff
+
+{curr, new}_util: The cpu utilization at the lowest level and the overall
+non-idle time for the entire group for higher levels. Utilization is in the
+range 0.0 to 1.0 in the pseudo-code.
+
+busy_power: The power consumption of the sched_group.
+
+idle_power: The power consumption of the sched_group when idle.
+
+Note: It is a fundamental assumption that the utilization is (roughly) scale
+invariant. Task utilization tracking factors in any frequency scaling and
+performance scaling differences due to difference cpu microarchitectures such
+that task utilization can be used across the entire system.
+
+
+Platform energy data
+=====================
+
+struct sched_group_energy can be attached to sched_groups in the sched_domain
+hierarchy and has the following members:
+
+cap_states:
+       List of struct capacity_state representing the supported capacity states
+       (P-states). struct capacity_state has two members: cap and power, which
+       represents the compute capacity and the busy_power of the state. The
+       list must be ordered by capacity low->high.
+
+nr_cap_states:
+       Number of capacity states in cap_states list.
+
+idle_states:
+       List of struct idle_state containing idle_state power cost for each
+       idle-state supported by the system orderd by shallowest state first.
+       All states must be included at all level in the hierarchy, i.e. a
+       sched_group spanning just a single cpu must also include coupled
+       idle-states (cluster states). In addition to the cpuidle idle-states,
+       the list must also contain an entry for the idling using the arch
+       default idle (arch_idle_cpu()). Despite this state may not be a true
+       hardware idle-state it is considered the shallowest idle-state in the
+       energy model and must be the first entry. cpus may enter this state
+       (possibly 'active idling') if cpuidle decides not enter a cpuidle
+       idle-state. Default idle may not be used when cpuidle is enabled.
+       In this case, it should just be a copy of the first cpuidle idle-state.
+
+nr_idle_states:
+       Number of idle states in idle_states list.
+
+There are no unit requirements for the energy cost data. Data can be normalized
+with any reference, however, the normalization must be consistent across all
+energy cost data. That is, one bogo-joule/watt must be the same quantity for
+data, but we don't care what it is.
+
+A recipe for platform characterization
+=======================================
+
+Obtaining the actual model data for a particular platform requires some way of
+measuring power/energy. There isn't a tool to help with this (yet). This
+section provides a recipe for use as reference. It covers the steps used to
+characterize the ARM TC2 development platform. This sort of measurements is
+expected to be done anyway when tuning cpuidle and cpufreq for a given
+platform.
+
+The energy model needs two types of data (struct sched_group_energy holds
+these) for each sched_group where energy costs should be taken into account:
+
+1. Capacity state information
+
+A list containing the compute capacity and power consumption when fully
+utilized attributed to the group as a whole for each available capacity state.
+At the lowest level (group contains just a single cpu) this is the power of the
+cpu alone without including power consumed by resources shared with other cpus.
+It basically needs to fit the basic modelling approach described in "Background
+and Terminology" section:
+
+       energy_system = energy_shared + n * energy_cpu
+
+for a system containing 'n' busy cpus. Only 'energy_cpu' should be included at
+the lowest level. 'energy_shared' is included at the next level which
+represents the group of cpus among which the resources are shared.
+
+This model is, of course, a simplification of reality. Thus, power/energy
+attributions might not always exactly represent how the hardware is designed.
+Also, busy power is likely to depend on the workload. It is therefore
+recommended to use a representative mix of workloads when characterizing the
+capacity states.
+
+If the group has no capacity scaling support, the list will contain a single
+state where power is the busy power attributed to the group. The capacity
+should be set to a default value (1024).
+
+When frequency domains include multiple power domains, the group representing
+the frequency domain and all child groups share capacity states. This must be
+indicated by setting the SD_SHARE_CAP_STATES sched_domain flag. All groups at
+all levels that share the capacity state must have the list of capacity states
+with the power set to the contribution of the individual group.
+
+2. Idle power information
+
+Stored in the idle_states list. The power number is the group idle power
+consumption in each idle state as well when the group is idle but has not
+entered an idle-state ('active idle' as mentioned earlier). Due to the way the
+energy model is defined, the idle power of the deepest group idle state can
+alternatively be accounted for in the parent group busy power. In that case the
+group idle state power values are offset such that the idle power of the
+deepest state is zero. It is less intuitive, but it is easier to measure as
+idle power consumed by the group and the busy/idle power of the parent group
+cannot be distinguished without per group measurement points.
+
+Measuring capacity states and idle power:
+
+The capacity states' capacity and power can be estimated by running a benchmark
+workload at each available capacity state. By restricting the benchmark to run
+on subsets of cpus it is possible to extrapolate the power consumption of
+shared resources.
+
+ARM TC2 has two clusters of two and three cpus respectively. Each cluster has a
+shared L2 cache. TC2 has on-chip energy counters per cluster. Running a
+benchmark workload on just one cpu in a cluster means that power is consumed in
+the cluster (higher level group) and a single cpu (lowest level group). Adding
+another benchmark task to another cpu increases the power consumption by the
+amount consumed by the additional cpu. Hence, it is possible to extrapolate the
+cluster busy power.
+
+For platforms that don't have energy counters or equivalent instrumentation
+built-in, it may be possible to use an external DAQ to acquire similar data.
+
+If the benchmark includes some performance score (for example sysbench cpu
+benchmark), this can be used to record the compute capacity.
+
+Measuring idle power requires insight into the idle state implementation on the
+particular platform. Specifically, if the platform has coupled idle-states (or
+package states). To measure non-coupled per-cpu idle-states it is necessary to
+keep one cpu busy to keep any shared resources alive to isolate the idle power
+of the cpu from idle/busy power of the shared resources. The cpu can be tricked
+into different per-cpu idle states by disabling the other states. Based on
+various combinations of measurements with specific cpus busy and disabling
+idle-states it is possible to extrapolate the idle-state power.
diff --git a/Documentation/scheduler/sched-tune.txt b/Documentation/scheduler/sched-tune.txt
new file mode 100644 (file)
index 0000000..9bd2231
--- /dev/null
@@ -0,0 +1,366 @@
+             Central, scheduler-driven, power-performance control
+                               (EXPERIMENTAL)
+
+Abstract
+========
+
+The topic of a single simple power-performance tunable, that is wholly
+scheduler centric, and has well defined and predictable properties has come up
+on several occasions in the past [1,2]. With techniques such as a scheduler
+driven DVFS [3], we now have a good framework for implementing such a tunable.
+This document describes the overall ideas behind its design and implementation.
+
+
+Table of Contents
+=================
+
+1. Motivation
+2. Introduction
+3. Signal Boosting Strategy
+4. OPP selection using boosted CPU utilization
+5. Per task group boosting
+6. Question and Answers
+   - What about "auto" mode?
+   - What about boosting on a congested system?
+   - How CPUs are boosted when we have tasks with multiple boost values?
+7. References
+
+
+1. Motivation
+=============
+
+Sched-DVFS [3] is a new event-driven cpufreq governor which allows the
+scheduler to select the optimal DVFS operating point (OPP) for running a task
+allocated to a CPU. The introduction of sched-DVFS enables running workloads at
+the most energy efficient OPPs.
+
+However, sometimes it may be desired to intentionally boost the performance of
+a workload even if that could imply a reasonable increase in energy
+consumption. For example, in order to reduce the response time of a task, we
+may want to run the task at a higher OPP than the one that is actually required
+by it's CPU bandwidth demand.
+
+This last requirement is especially important if we consider that one of the
+main goals of the sched-DVFS component is to replace all currently available
+CPUFreq policies. Since sched-DVFS is event based, as opposed to the sampling
+driven governors we currently have, it is already more responsive at selecting
+the optimal OPP to run tasks allocated to a CPU. However, just tracking the
+actual task load demand may not be enough from a performance standpoint.  For
+example, it is not possible to get behaviors similar to those provided by the
+"performance" and "interactive" CPUFreq governors.
+
+This document describes an implementation of a tunable, stacked on top of the
+sched-DVFS which extends its functionality to support task performance
+boosting.
+
+By "performance boosting" we mean the reduction of the time required to
+complete a task activation, i.e. the time elapsed from a task wakeup to its
+next deactivation (e.g. because it goes back to sleep or it terminates).  For
+example, if we consider a simple periodic task which executes the same workload
+for 5[s] every 20[s] while running at a certain OPP, a boosted execution of
+that task must complete each of its activations in less than 5[s].
+
+A previous attempt [5] to introduce such a boosting feature has not been
+successful mainly because of the complexity of the proposed solution.  The
+approach described in this document exposes a single simple interface to
+user-space.  This single tunable knob allows the tuning of system wide
+scheduler behaviours ranging from energy efficiency at one end through to
+incremental performance boosting at the other end.  This first tunable affects
+all tasks. However, a more advanced extension of the concept is also provided
+which uses CGroups to boost the performance of only selected tasks while using
+the energy efficient default for all others.
+
+The rest of this document introduces in more details the proposed solution
+which has been named SchedTune.
+
+
+2. Introduction
+===============
+
+SchedTune exposes a simple user-space interface with a single power-performance
+tunable:
+
+  /proc/sys/kernel/sched_cfs_boost
+
+This permits expressing a boost value as an integer in the range [0..100].
+
+A value of 0 (default) configures the CFS scheduler for maximum energy
+efficiency. This means that sched-DVFS runs the tasks at the minimum OPP
+required to satisfy their workload demand.
+A value of 100 configures scheduler for maximum performance, which translates
+to the selection of the maximum OPP on that CPU.
+
+The range between 0 and 100 can be set to satisfy other scenarios suitably. For
+example to satisfy interactive response or depending on other system events
+(battery level etc).
+
+A CGroup based extension is also provided, which permits further user-space
+defined task classification to tune the scheduler for different goals depending
+on the specific nature of the task, e.g. background vs interactive vs
+low-priority.
+
+The overall design of the SchedTune module is built on top of "Per-Entity Load
+Tracking" (PELT) signals and sched-DVFS by introducing a bias on the Operating
+Performance Point (OPP) selection.
+Each time a task is allocated on a CPU, sched-DVFS has the opportunity to tune
+the operating frequency of that CPU to better match the workload demand. The
+selection of the actual OPP being activated is influenced by the global boost
+value, or the boost value for the task CGroup when in use.
+
+This simple biasing approach leverages existing frameworks, which means minimal
+modifications to the scheduler, and yet it allows to achieve a range of
+different behaviours all from a single simple tunable knob.
+The only new concept introduced is that of signal boosting.
+
+
+3. Signal Boosting Strategy
+===========================
+
+The whole PELT machinery works based on the value of a few load tracking signals
+which basically track the CPU bandwidth requirements for tasks and the capacity
+of CPUs. The basic idea behind the SchedTune knob is to artificially inflate
+some of these load tracking signals to make a task or RQ appears more demanding
+that it actually is.
+
+Which signals have to be inflated depends on the specific "consumer".  However,
+independently from the specific (signal, consumer) pair, it is important to
+define a simple and possibly consistent strategy for the concept of boosting a
+signal.
+
+A boosting strategy defines how the "abstract" user-space defined
+sched_cfs_boost value is translated into an internal "margin" value to be added
+to a signal to get its inflated value:
+
+  margin         := boosting_strategy(sched_cfs_boost, signal)
+  boosted_signal := signal + margin
+
+Different boosting strategies were identified and analyzed before selecting the
+one found to be most effective.
+
+Signal Proportional Compensation (SPC)
+--------------------------------------
+
+In this boosting strategy the sched_cfs_boost value is used to compute a
+margin which is proportional to the complement of the original signal.
+When a signal has a maximum possible value, its complement is defined as
+the delta from the actual value and its possible maximum.
+
+Since the tunable implementation uses signals which have SCHED_LOAD_SCALE as
+the maximum possible value, the margin becomes:
+
+       margin := sched_cfs_boost * (SCHED_LOAD_SCALE - signal)
+
+Using this boosting strategy:
+- a 100% sched_cfs_boost means that the signal is scaled to the maximum value
+- each value in the range of sched_cfs_boost effectively inflates the signal in
+  question by a quantity which is proportional to the maximum value.
+
+For example, by applying the SPC boosting strategy to the selection of the OPP
+to run a task it is possible to achieve these behaviors:
+
+-   0% boosting: run the task at the minimum OPP required by its workload
+- 100% boosting: run the task at the maximum OPP available for the CPU
+-  50% boosting: run at the half-way OPP between minimum and maximum
+
+Which means that, at 50% boosting, a task will be scheduled to run at half of
+the maximum theoretically achievable performance on the specific target
+platform.
+
+A graphical representation of an SPC boosted signal is represented in the
+following figure where:
+ a) "-" represents the original signal
+ b) "b" represents a  50% boosted signal
+ c) "p" represents a 100% boosted signal
+
+
+   ^
+   |  SCHED_LOAD_SCALE
+   +-----------------------------------------------------------------+
+   |pppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppp
+   |
+   |                                             boosted_signal
+   |                                          bbbbbbbbbbbbbbbbbbbbbbbb
+   |
+   |                                            original signal
+   |                  bbbbbbbbbbbbbbbbbbbbbbbb+----------------------+
+   |                                          |
+   |bbbbbbbbbbbbbbbbbb                        |
+   |                                          |
+   |                                          |
+   |                                          |
+   |                  +-----------------------+
+   |                  |
+   |                  |
+   |                  |
+   |------------------+
+   |
+   |
+   +----------------------------------------------------------------------->
+
+The plot above shows a ramped load signal (titled 'original_signal') and it's
+boosted equivalent. For each step of the original signal the boosted signal
+corresponding to a 50% boost is midway from the original signal and the upper
+bound. Boosting by 100% generates a boosted signal which is always saturated to
+the upper bound.
+
+
+4. OPP selection using boosted CPU utilization
+==============================================
+
+It is worth calling out that the implementation does not introduce any new load
+signals. Instead, it provides an API to tune existing signals. This tuning is
+done on demand and only in scheduler code paths where it is sensible to do so.
+The new API calls are defined to return either the default signal or a boosted
+one, depending on the value of sched_cfs_boost. This is a clean an non invasive
+modification of the existing existing code paths.
+
+The signal representing a CPU's utilization is boosted according to the
+previously described SPC boosting strategy. To sched-DVFS, this allows a CPU
+(ie CFS run-queue) to appear more used then it actually is.
+
+Thus, with the sched_cfs_boost enabled we have the following main functions to
+get the current utilization of a CPU:
+
+  cpu_util()
+  boosted_cpu_util()
+
+The new boosted_cpu_util() is similar to the first but returns a boosted
+utilization signal which is a function of the sched_cfs_boost value.
+
+This function is used in the CFS scheduler code paths where sched-DVFS needs to
+decide the OPP to run a CPU at.
+For example, this allows selecting the highest OPP for a CPU which has
+the boost value set to 100%.
+
+
+5. Per task group boosting
+==========================
+
+The availability of a single knob which is used to boost all tasks in the
+system is certainly a simple solution but it quite likely doesn't fit many
+utilization scenarios, especially in the mobile device space.
+
+For example, on battery powered devices there usually are many background
+services which are long running and need energy efficient scheduling. On the
+other hand, some applications are more performance sensitive and require an
+interactive response and/or maximum performance, regardless of the energy cost.
+To better service such scenarios, the SchedTune implementation has an extension
+that provides a more fine grained boosting interface.
+
+A new CGroup controller, namely "schedtune", could be enabled which allows to
+defined and configure task groups with different boosting values.
+Tasks that require special performance can be put into separate CGroups.
+The value of the boost associated with the tasks in this group can be specified
+using a single knob exposed by the CGroup controller:
+
+   schedtune.boost
+
+This knob allows the definition of a boost value that is to be used for
+SPC boosting of all tasks attached to this group.
+
+The current schedtune controller implementation is really simple and has these
+main characteristics:
+
+  1) It is only possible to create 1 level depth hierarchies
+
+     The root control groups define the system-wide boost value to be applied
+     by default to all tasks. Its direct subgroups are named "boost groups" and
+     they define the boost value for specific set of tasks.
+     Further nested subgroups are not allowed since they do not have a sensible
+     meaning from a user-space standpoint.
+
+  2) It is possible to define only a limited number of "boost groups"
+
+     This number is defined at compile time and by default configured to 16.
+     This is a design decision motivated by two main reasons:
+     a) In a real system we do not expect utilization scenarios with more then few
+       boost groups. For example, a reasonable collection of groups could be
+        just "background", "interactive" and "performance".
+     b) It simplifies the implementation considerably, especially for the code
+       which has to compute the per CPU boosting once there are multiple
+        RUNNABLE tasks with different boost values.
+
+Such a simple design should allow servicing the main utilization scenarios identified
+so far. It provides a simple interface which can be used to manage the
+power-performance of all tasks or only selected tasks.
+Moreover, this interface can be easily integrated by user-space run-times (e.g.
+Android, ChromeOS) to implement a QoS solution for task boosting based on tasks
+classification, which has been a long standing requirement.
+
+Setup and usage
+---------------
+
+0. Use a kernel with CGROUP_SCHEDTUNE support enabled
+
+1. Check that the "schedtune" CGroup controller is available:
+
+   root@linaro-nano:~# cat /proc/cgroups
+   #subsys_name        hierarchy       num_cgroups     enabled
+   cpuset      0               1               1
+   cpu         0               1               1
+   schedtune   0               1               1
+
+2. Mount a tmpfs to create the CGroups mount point (Optional)
+
+   root@linaro-nano:~# sudo mount -t tmpfs cgroups /sys/fs/cgroup
+
+3. Mount the "schedtune" controller
+
+   root@linaro-nano:~# mkdir /sys/fs/cgroup/stune
+   root@linaro-nano:~# sudo mount -t cgroup -o schedtune stune /sys/fs/cgroup/stune
+
+4. Setup the system-wide boost value (Optional)
+
+   If not configured the root control group has a 0% boost value, which
+   basically disables boosting for all tasks in the system thus running in
+   an energy-efficient mode.
+
+   root@linaro-nano:~# echo $SYSBOOST > /sys/fs/cgroup/stune/schedtune.boost
+
+5. Create task groups and configure their specific boost value (Optional)
+
+   For example here we create a "performance" boost group configure to boost
+   all its tasks to 100%
+
+   root@linaro-nano:~# mkdir /sys/fs/cgroup/stune/performance
+   root@linaro-nano:~# echo 100 > /sys/fs/cgroup/stune/performance/schedtune.boost
+
+6. Move tasks into the boost group
+
+   For example, the following moves the tasks with PID $TASKPID (and all its
+   threads) into the "performance" boost group.
+
+   root@linaro-nano:~# echo "TASKPID > /sys/fs/cgroup/stune/performance/cgroup.procs
+
+This simple configuration allows only the threads of the $TASKPID task to run,
+when needed, at the highest OPP in the most capable CPU of the system.
+
+
+6. Question and Answers
+=======================
+
+What about "auto" mode?
+-----------------------
+
+The 'auto' mode as described in [5] can be implemented by interfacing SchedTune
+with some suitable user-space element. This element could use the exposed
+system-wide or cgroup based interface.
+
+How are multiple groups of tasks with different boost values managed?
+---------------------------------------------------------------------
+
+The current SchedTune implementation keeps track of the boosted RUNNABLE tasks
+on a CPU. Once sched-DVFS selects the OPP to run a CPU at, the CPU utilization
+is boosted with a value which is the maximum of the boost values of the
+currently RUNNABLE tasks in its RQ.
+
+This allows sched-DVFS to boost a CPU only while there are boosted tasks ready
+to run and switch back to the energy efficient mode as soon as the last boosted
+task is dequeued.
+
+
+7. References
+=============
+[1] http://lwn.net/Articles/552889
+[2] http://lkml.org/lkml/2012/5/18/91
+[3] http://lkml.org/lkml/2015/6/26/620
diff --git a/Documentation/sync.txt b/Documentation/sync.txt
new file mode 100644 (file)
index 0000000..a2d05e7
--- /dev/null
@@ -0,0 +1,75 @@
+Motivation:
+
+In complicated DMA pipelines such as graphics (multimedia, camera, gpu, display)
+a consumer of a buffer needs to know when the producer has finished producing
+it.  Likewise the producer needs to know when the consumer is finished with the
+buffer so it can reuse it.  A particular buffer may be consumed by multiple
+consumers which will retain the buffer for different amounts of time.  In
+addition, a consumer may consume multiple buffers atomically.
+The sync framework adds an API which allows synchronization between the
+producers and consumers in a generic way while also allowing platforms which
+have shared hardware synchronization primitives to exploit them.
+
+Goals:
+       * provide a generic API for expressing synchronization dependencies
+       * allow drivers to exploit hardware synchronization between hardware
+         blocks
+       * provide a userspace API that allows a compositor to manage
+         dependencies.
+       * provide rich telemetry data to allow debugging slowdowns and stalls of
+          the graphics pipeline.
+
+Objects:
+       * sync_timeline
+       * sync_pt
+       * sync_fence
+
+sync_timeline:
+
+A sync_timeline is an abstract monotonically increasing counter. In general,
+each driver/hardware block context will have one of these.  They can be backed
+by the appropriate hardware or rely on the generic sw_sync implementation.
+Timelines are only ever created through their specific implementations
+(i.e. sw_sync.)
+
+sync_pt:
+
+A sync_pt is an abstract value which marks a point on a sync_timeline. Sync_pts
+have a single timeline parent.  They have 3 states: active, signaled, and error.
+They start in active state and transition, once, to either signaled (when the
+timeline counter advances beyond the sync_pt’s value) or error state.
+
+sync_fence:
+
+Sync_fences are the primary primitives used by drivers to coordinate
+synchronization of their buffers.  They are a collection of sync_pts which may
+or may not have the same timeline parent.  A sync_pt can only exist in one fence
+and the fence's list of sync_pts is immutable once created.  Fences can be
+waited on synchronously or asynchronously.  Two fences can also be merged to
+create a third fence containing a copy of the two fences’ sync_pts.  Fences are
+backed by file descriptors to allow userspace to coordinate the display pipeline
+dependencies.
+
+Use:
+
+A driver implementing sync support should have a work submission function which:
+     * takes a fence argument specifying when to begin work
+     * asynchronously queues that work to kick off when the fence is signaled
+     * returns a fence to indicate when its work will be done.
+     * signals the returned fence once the work is completed.
+
+Consider an imaginary display driver that has the following API:
+/*
+ * assumes buf is ready to be displayed.
+ * blocks until the buffer is on screen.
+ */
+    void display_buffer(struct dma_buf *buf);
+
+The new API will become:
+/*
+ * will display buf when fence is signaled.
+ * returns immediately with a fence that will signal when buf
+ * is no longer displayed.
+ */
+struct sync_fence* display_buffer(struct dma_buf *buf,
+                                 struct sync_fence *fence);
index af70d1541d3af5b18834bce320ddd3e37009d29b..5728779df1aba2aef04c17d3eb3f89a0b7be555a 100644 (file)
@@ -58,6 +58,8 @@ show up in /proc/sys/kernel:
 - panic_on_stackoverflow
 - panic_on_unrecovered_nmi
 - panic_on_warn
+- perf_cpu_time_max_percent
+- perf_event_paranoid
 - pid_max
 - powersave-nap               [ PPC only ]
 - printk
@@ -624,6 +626,19 @@ allowed to execute.
 
 ==============================================================
 
+perf_event_paranoid:
+
+Controls use of the performance events system by unprivileged
+users (without CAP_SYS_ADMIN).  The default value is 3 if
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT is set, or 1 otherwise.
+
+ -1: Allow use of (almost) all events by all users
+>=0: Disallow raw tracepoint access by users without CAP_IOC_LOCK
+>=1: Disallow CPU event access by users without CAP_SYS_ADMIN
+>=2: Disallow kernel profiling by users without CAP_SYS_ADMIN
+>=3: Disallow all event access by users without CAP_SYS_ADMIN
+
+==============================================================
 
 pid_max:
 
index f72370b440b121e718183ea6a7dbb9bafc2dcc25..c0397afe6a1d457d97f8d7493c43cf8fc0b6b83a 100644 (file)
@@ -30,6 +30,7 @@ Currently, these files are in /proc/sys/vm:
 - dirty_writeback_centisecs
 - drop_caches
 - extfrag_threshold
+- extra_free_kbytes
 - hugepages_treat_as_movable
 - hugetlb_shm_group
 - laptop_mode
@@ -42,6 +43,8 @@ Currently, these files are in /proc/sys/vm:
 - min_slab_ratio
 - min_unmapped_ratio
 - mmap_min_addr
+- mmap_rnd_bits
+- mmap_rnd_compat_bits
 - nr_hugepages
 - nr_overcommit_hugepages
 - nr_trim_pages         (only if CONFIG_MMU=n)
@@ -236,6 +239,21 @@ fragmentation index is <= extfrag_threshold. The default value is 500.
 
 ==============================================================
 
+extra_free_kbytes
+
+This parameter tells the VM to keep extra free memory between the threshold
+where background reclaim (kswapd) kicks in, and the threshold where direct
+reclaim (by allocating processes) kicks in.
+
+This is useful for workloads that require low latency memory allocations
+and have a bounded burstiness in memory allocations, for example a
+realtime application that receives and transmits network traffic
+(causing in-kernel memory allocations) with a maximum total message burst
+size of 200MB may need 200MB of extra free memory to avoid direct reclaim
+related latencies.
+
+==============================================================
+
 hugepages_treat_as_movable
 
 This parameter controls whether we can allocate hugepages from ZONE_MOVABLE
@@ -485,6 +503,33 @@ against future potential kernel bugs.
 
 ==============================================================
 
+mmap_rnd_bits:
+
+This value can be used to select the number of bits to use to
+determine the random offset to the base address of vma regions
+resulting from mmap allocations on architectures which support
+tuning address space randomization.  This value will be bounded
+by the architecture's minimum and maximum supported values.
+
+This value can be changed after boot using the
+/proc/sys/vm/mmap_rnd_bits tunable
+
+==============================================================
+
+mmap_rnd_compat_bits:
+
+This value can be used to select the number of bits to use to
+determine the random offset to the base address of vma regions
+resulting from mmap allocations for applications run in
+compatibility mode on architectures which support tuning address
+space randomization.  This value will be bounded by the
+architecture's minimum and maximum supported values.
+
+This value can be changed after boot using the
+/proc/sys/vm/mmap_rnd_compat_bits tunable
+
+==============================================================
+
 nr_hugepages
 
 Change the minimum size of the hugepage pool.
index 21d514ced212436ea70aa5a218e20a5f8407523e..4d817d5acc4090a7001b7ca4ae505d7884d1e3a3 100644 (file)
@@ -25,6 +25,7 @@ cpufreq.
 
 cpu_idle               "state=%lu cpu_id=%lu"
 cpu_frequency          "state=%lu cpu_id=%lu"
+cpu_frequency_limits   "min=%lu max=%lu cpu_id=%lu"
 
 A suspend event is used to indicate the system going in and out of the
 suspend mode:
index f52f297cb40627a7d5855f04399977f304272e51..fa16fb2302a58c1dcdf127a7a4a71f2afb739cc5 100644 (file)
@@ -357,6 +357,26 @@ of ftrace. Here is a list of some of the key files:
                  to correlate events across hypervisor/guest if
                  tb_offset is known.
 
+         mono: This uses the fast monotonic clock (CLOCK_MONOTONIC)
+               which is monotonic and is subject to NTP rate adjustments.
+
+         mono_raw:
+               This is the raw monotonic clock (CLOCK_MONOTONIC_RAW)
+               which is montonic but is not subject to any rate adjustments
+               and ticks at the same rate as the hardware clocksource.
+
+         boot: This is the boot clock (CLOCK_BOOTTIME) and is based on the
+               fast monotonic clock, but also accounts for time spent in
+               suspend. Since the clock access is designed for use in
+               tracing in the suspend path, some side effects are possible
+               if clock is accessed after the suspend time is accounted before
+               the fast mono clock is updated. In this case, the clock update
+               appears to happen slightly sooner than it normally would have.
+               Also on 32-bit systems, its possible that the 64-bit boot offset
+               sees a partial update. These effects are rare and post
+               processing should be able to handle them. See comments on
+               ktime_get_boot_fast_ns function for more information.
+
        To set a clock, simply echo the clock name into this file.
 
          echo global > trace_clock
@@ -2088,6 +2108,35 @@ will produce:
  1)   1.449 us    |             }
 
 
+You can disable the hierarchical function call formatting and instead print a
+flat list of function entry and return events.  This uses the format described
+in the Output Formatting section and respects all the trace options that
+control that formatting.  Hierarchical formatting is the default.
+
+       hierachical: echo nofuncgraph-flat > trace_options
+       flat: echo funcgraph-flat > trace_options
+
+  ie:
+
+  # tracer: function_graph
+  #
+  # entries-in-buffer/entries-written: 68355/68355   #P:2
+  #
+  #                              _-----=> irqs-off
+  #                             / _----=> need-resched
+  #                            | / _---=> hardirq/softirq
+  #                            || / _--=> preempt-depth
+  #                            ||| /     delay
+  #           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION
+  #              | |       |   ||||       |         |
+                sh-1806  [001] d...   198.843443: graph_ent: func=_raw_spin_lock
+                sh-1806  [001] d...   198.843445: graph_ent: func=__raw_spin_lock
+                sh-1806  [001] d..1   198.843447: graph_ret: func=__raw_spin_lock
+                sh-1806  [001] d..1   198.843449: graph_ret: func=_raw_spin_lock
+                sh-1806  [001] d..1   198.843451: graph_ent: func=_raw_spin_unlock_irqrestore
+                sh-1806  [001] d...   198.843453: graph_ret: func=_raw_spin_unlock_irqrestore
+
+
 You might find other useful features for this tracer in the
 following "dynamic ftrace" section such as tracing only specific
 functions or tasks.
diff --git a/android/configs/README b/android/configs/README
new file mode 100644 (file)
index 0000000..8798731
--- /dev/null
@@ -0,0 +1,15 @@
+The files in this directory are meant to be used as a base for an Android
+kernel config. All devices should have the options in android-base.cfg enabled.
+While not mandatory, the options in android-recommended.cfg enable advanced
+Android features.
+
+Assuming you already have a minimalist defconfig for your device, a possible
+way to enable these options would be:
+
+     ARCH=<arch> scripts/kconfig/merge_config.sh <path_to>/<device>_defconfig android/configs/android-base.cfg android/configs/android-recommended.cfg
+
+This will generate a .config that can then be used to save a new defconfig or
+compile a new kernel with Android features enabled.
+
+Because there is no tool to consistently generate these config fragments,
+lets keep them alphabetically sorted instead of random.
diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg
new file mode 100644 (file)
index 0000000..8531a7a
--- /dev/null
@@ -0,0 +1,168 @@
+#  KEEP ALPHABETICALLY SORTED
+# CONFIG_DEVKMEM is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_MODULES is not set
+# CONFIG_OABI_COMPAT is not set
+# CONFIG_SYSVIPC is not set
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_ASHMEM=y
+CONFIG_AUDIT=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_EMBEDDED=y
+CONFIG_FB=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_INET=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_INET_ESP=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IPV6=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_NET=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_KEY=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_NAT=y
+CONFIG_NO_HZ=y
+CONFIG_PACKET=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PPP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PREEMPT=y
+CONFIG_PROFILING=y
+CONFIG_QUOTA=y
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SECCOMP=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_STAGING=y
+CONFIG_SWP_EMULATION=y
+CONFIG_SYNC=y
+CONFIG_TUN=y
+CONFIG_UID_CPUTIME=y
+CONFIG_UNIX=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_XFRM_USER=y
diff --git a/android/configs/android-recommended.cfg b/android/configs/android-recommended.cfg
new file mode 100644 (file)
index 0000000..3fd0b13
--- /dev/null
@@ -0,0 +1,128 @@
+#  KEEP ALPHABETICALLY SORTED
+# CONFIG_AIO is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_NF_CONNTRACK_SIP is not set
+# CONFIG_PM_WAKELOCKS_GC is not set
+# CONFIG_VT is not set
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ARM_KERNMEM_PERMS=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_COMPACTION=y
+CONFIG_DEBUG_RODATA=y
+CONFIG_DM_UEVENT=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FUSE_FS=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HIDRAW=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_HOLTEK=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_GPIO=y
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_TABLET=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_ION=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KSM=y
+CONFIG_LOGIG940_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGITECH_FF=y
+CONFIG_MD=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MSDOS_FS=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANTHERLORD_FF=y
+CONFIG_PERF_EVENTS=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+CONFIG_POWER_SUPPLY=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_SND=y
+CONFIG_SOUND=y
+CONFIG_SUSPEND_TIME=y
+CONFIG_TABLET_USB_ACECAD=y
+CONFIG_TABLET_USB_AIPTEK=y
+CONFIG_TABLET_USB_GTCO=y
+CONFIG_TABLET_USB_HANWANG=y
+CONFIG_TABLET_USB_KBTAB=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_TASK_XACCT=y
+CONFIG_TIMER_STATS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UHID=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_USBNET=y
+CONFIG_VFAT_FS=y
index d4d9845530f1df6cb70117a1eca282d31fb4acbb..98f64ad1caf1817e5535a223f84412422c20c1ed 100644 (file)
@@ -527,6 +527,74 @@ config HAVE_COPY_THREAD_TLS
          normal C parameter passing, rather than extracting the syscall
          argument from pt_regs.
 
+config HAVE_ARCH_MMAP_RND_BITS
+       bool
+       help
+         An arch should select this symbol if it supports setting a variable
+         number of bits for use in establishing the base address for mmap
+         allocations, has MMU enabled and provides values for both:
+         - ARCH_MMAP_RND_BITS_MIN
+         - ARCH_MMAP_RND_BITS_MAX
+
+config ARCH_MMAP_RND_BITS_MIN
+       int
+
+config ARCH_MMAP_RND_BITS_MAX
+       int
+
+config ARCH_MMAP_RND_BITS_DEFAULT
+       int
+
+config ARCH_MMAP_RND_BITS
+       int "Number of bits to use for ASLR of mmap base address" if EXPERT
+       range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
+       default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
+       default ARCH_MMAP_RND_BITS_MIN
+       depends on HAVE_ARCH_MMAP_RND_BITS
+       help
+         This value can be used to select the number of bits to use to
+         determine the random offset to the base address of vma regions
+         resulting from mmap allocations. This value will be bounded
+         by the architecture's minimum and maximum supported values.
+
+         This value can be changed after boot using the
+         /proc/sys/vm/mmap_rnd_bits tunable
+
+config HAVE_ARCH_MMAP_RND_COMPAT_BITS
+       bool
+       help
+         An arch should select this symbol if it supports running applications
+         in compatibility mode, supports setting a variable number of bits for
+         use in establishing the base address for mmap allocations, has MMU
+         enabled and provides values for both:
+         - ARCH_MMAP_RND_COMPAT_BITS_MIN
+         - ARCH_MMAP_RND_COMPAT_BITS_MAX
+
+config ARCH_MMAP_RND_COMPAT_BITS_MIN
+       int
+
+config ARCH_MMAP_RND_COMPAT_BITS_MAX
+       int
+
+config ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
+       int
+
+config ARCH_MMAP_RND_COMPAT_BITS
+       int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
+       range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
+       default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
+       default ARCH_MMAP_RND_COMPAT_BITS_MIN
+       depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
+       help
+         This value can be used to select the number of bits to use to
+         determine the random offset to the base address of vma regions
+         resulting from mmap allocations for compatible applications This
+         value will be bounded by the architecture's minimum and maximum
+         supported values.
+
+         This value can be changed after boot using the
+         /proc/sys/vm/mmap_rnd_compat_bits tunable
+
 #
 # ABI hall of shame
 #
index 9049ac023bee94dd6458b3bf2a4bebcbe9d5f593..625765fb805adbbad99327017d16e894b8265096 100644 (file)
@@ -35,6 +35,7 @@ config ARM
        select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32
        select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
+       select HAVE_ARCH_MMAP_RND_BITS if MMU
        select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
        select HAVE_ARCH_TRACEHOOK
@@ -310,6 +311,14 @@ config MMU
          Select if you want MMU-based virtualised addressing space
          support by paged memory management. If unsure, say 'Y'.
 
+config ARCH_MMAP_RND_BITS_MIN
+       default 8
+
+config ARCH_MMAP_RND_BITS_MAX
+       default 14 if PAGE_OFFSET=0x40000000
+       default 15 if PAGE_OFFSET=0x80000000
+       default 16
+
 #
 # The "ARM system type" choice list is ordered alphabetically by option
 # text.  Please add new entries in the option alphabetic order.
@@ -1817,6 +1826,15 @@ config XEN
        help
          Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
 
+config ARM_FLUSH_CONSOLE_ON_RESTART
+       bool "Force flush the console on restart"
+       help
+         If the console is locked while the system is rebooted, the messages
+         in the temporary logbuffer would not have propogated to all the
+         console drivers. This option forces the console lock to be
+         released if it failed to be acquired, which will cause all the
+         pending messages to be flushed.
+
 endmenu
 
 menu "Boot options"
@@ -1845,6 +1863,21 @@ config DEPRECATED_PARAM_STRUCT
          This was deprecated in 2001 and announced to live on for 5 years.
          Some old boot loaders still use this way.
 
+config BUILD_ARM_APPENDED_DTB_IMAGE
+       bool "Build a concatenated zImage/dtb by default"
+       depends on OF
+       help
+         Enabling this option will cause a concatenated zImage and list of
+         DTBs to be built by default (instead of a standalone zImage.)
+         The image will built in arch/arm/boot/zImage-dtb
+
+config BUILD_ARM_APPENDED_DTB_IMAGE_NAMES
+       string "Default dtb names"
+       depends on BUILD_ARM_APPENDED_DTB_IMAGE
+       help
+         Space separated list of names of dtbs to append when
+         building a concatenated zImage-dtb.
+
 # Compressed boot loader in ROM.  Yes, we really want to ask about
 # TEXT and BSS so we preserve their values in the config files.
 config ZBOOT_ROM_TEXT
index ddbb361267d879c3c2947c9b094d7be784e2f1c2..f08e4fd600a3cdc24d8e18f1296833681ffe2a9d 100644 (file)
@@ -1618,6 +1618,14 @@ config EARLY_PRINTK
          kernel low-level debugging functions. Add earlyprintk to your
          kernel parameters to enable this console.
 
+config EARLY_PRINTK_DIRECT
+       bool "Early printk direct"
+       depends on DEBUG_LL
+       help
+         Say Y here if you want to have an early console using the
+         kernel low-level debugging functions and EARLY_PRINTK is
+         not early enough.
+
 config ARM_KPROBES_TEST
        tristate "Kprobes test module"
        depends on KPROBES && MODULES
index 2c2b28ee48119771dfa92f353124795d456d770a..88e479c8bcefefa594d35bd9f47c91fa1c914b93 100644 (file)
@@ -296,6 +296,8 @@ libs-y                              := arch/arm/lib/ $(libs-y)
 # Default target when executing plain make
 ifeq ($(CONFIG_XIP_KERNEL),y)
 KBUILD_IMAGE := xipImage
+else ifeq ($(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE),y)
+KBUILD_IMAGE := zImage-dtb
 else
 KBUILD_IMAGE := zImage
 endif
@@ -346,6 +348,9 @@ ifeq ($(CONFIG_VDSO),y)
        $(Q)$(MAKE) $(build)=arch/arm/vdso $@
 endif
 
+zImage-dtb: vmlinux scripts dtbs
+       $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+
 # We use MRPROPER_FILES and CLEAN_FILES now
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
index 3c79f85975aaa26c7c2e353fefc54d71d89bc5bf..ad7a0253ea961a405fb13bb284bd1c5942e28675 100644 (file)
@@ -4,3 +4,4 @@ xipImage
 bootpImage
 uImage
 *.dtb
+zImage-dtb
\ No newline at end of file
index 9eca7aee927f3081975d7372a66a358565c33c56..4a04ed3daf97891cce6d9e24faf4f60634e34e72 100644 (file)
@@ -14,6 +14,7 @@
 ifneq ($(MACHINE),)
 include $(MACHINE)/Makefile.boot
 endif
+include $(srctree)/arch/arm/boot/dts/Makefile
 
 # Note: the following conditions must always be true:
 #   ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
@@ -27,6 +28,14 @@ export ZRELADDR INITRD_PHYS PARAMS_PHYS
 
 targets := Image zImage xipImage bootpImage uImage
 
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
+
 ifeq ($(CONFIG_XIP_KERNEL),y)
 
 $(obj)/xipImage: vmlinux FORCE
@@ -55,6 +64,10 @@ $(obj)/zImage:       $(obj)/compressed/vmlinux FORCE
        $(call if_changed,objcopy)
        @$(kecho) '  Kernel: $@ is ready'
 
+$(obj)/zImage-dtb:     $(obj)/zImage $(DTB_OBJS) FORCE
+       $(call if_changed,cat)
+       @echo '  Kernel: $@ is ready'
+
 endif
 
 ifneq ($(LOADADDR),)
index 856913705169f089e55bcc41c1483e08a31a59db..d2e43b053d9b61b7ab90478d8c41041b9457baa0 100644 (file)
@@ -778,6 +778,8 @@ __armv7_mmu_cache_on:
                bic     r6, r6, #1 << 31        @ 32-bit translation system
                bic     r6, r6, #(7 << 0) | (1 << 4)    @ use only ttbr0
                mcrne   p15, 0, r3, c2, c0, 0   @ load page table pointer
+               mcrne   p15, 0, r0, c8, c7, 0   @ flush I,D TLBs
+               mcr     p15, 0, r0, c7, c5, 4   @ ISB
                mcrne   p15, 0, r1, c3, c0, 0   @ load domain access control
                mcrne   p15, 0, r6, c2, c0, 2   @ load ttb control
 #endif
index 30bbc3746130a56e54fa665a763894fe4ec02e6a..97d1b3719c755bbb8e273f6916ef7d6a472d8af3 100644 (file)
@@ -782,5 +782,15 @@ endif
 dtstree                := $(srctree)/$(src)
 dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
 
-always         := $(dtb-y)
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+
+targets += dtbs dtbs_install
+targets += $(DTB_LIST)
+
+always         := $(DTB_LIST)
 clean-files    := *.dtb
index 9353184d730dfda864c85ec180b906b9ed575681..ce01364a96e3b37ae4b7e2147515834131ca2922 100644 (file)
@@ -17,3 +17,7 @@ config SHARP_PARAM
 
 config SHARP_SCOOP
        bool
+
+config FIQ_GLUE
+       bool
+       select FIQ
index 27f23b15b1ea272a227fa431c12a0266de7061fe..04aca896b3389e3405327b5cc29e680cbc2fa345 100644 (file)
@@ -4,6 +4,7 @@
 
 obj-y                          += firmware.o
 
+obj-$(CONFIG_FIQ_GLUE)         += fiq_glue.o fiq_glue_setup.o
 obj-$(CONFIG_ICST)             += icst.o
 obj-$(CONFIG_SA1111)           += sa1111.o
 obj-$(CONFIG_DMABOUNCE)                += dmabounce.o
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S
new file mode 100644 (file)
index 0000000..24b42ce
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+               .text
+
+               .global fiq_glue_end
+
+               /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
+
+ENTRY(fiq_glue)
+               /* store pc, cpsr from previous mode, reserve space for spsr */
+               mrs     r12, spsr
+               sub     lr, lr, #4
+               subs    r10, #1
+               bne     nested_fiq
+
+               str     r12, [sp, #-8]!
+               str     lr, [sp, #-4]!
+
+               /* store r8-r14 from previous mode */
+               sub     sp, sp, #(7 * 4)
+               stmia   sp, {r8-r14}^
+               nop
+
+               /* store r0-r7 from previous mode */
+               stmfd   sp!, {r0-r7}
+
+               /* setup func(data,regs) arguments */
+               mov     r0, r9
+               mov     r1, sp
+               mov     r3, r8
+
+               mov     r7, sp
+
+               /* Get sp and lr from non-user modes */
+               and     r4, r12, #MODE_MASK
+               cmp     r4, #USR_MODE
+               beq     fiq_from_usr_mode
+
+               mov     r7, sp
+               orr     r4, r4, #(PSR_I_BIT | PSR_F_BIT)
+               msr     cpsr_c, r4
+               str     sp, [r7, #(4 * 13)]
+               str     lr, [r7, #(4 * 14)]
+               mrs     r5, spsr
+               str     r5, [r7, #(4 * 17)]
+
+               cmp     r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+               /* use fiq stack if we reenter this mode */
+               subne   sp, r7, #(4 * 3)
+
+fiq_from_usr_mode:
+               msr     cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+               mov     r2, sp
+               sub     sp, r7, #12
+               stmfd   sp!, {r2, ip, lr}
+               /* call func(data,regs) */
+               blx     r3
+               ldmfd   sp, {r2, ip, lr}
+               mov     sp, r2
+
+               /* restore/discard saved state */
+               cmp     r4, #USR_MODE
+               beq     fiq_from_usr_mode_exit
+
+               msr     cpsr_c, r4
+               ldr     sp, [r7, #(4 * 13)]
+               ldr     lr, [r7, #(4 * 14)]
+               msr     spsr_cxsf, r5
+
+fiq_from_usr_mode_exit:
+               msr     cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+
+               ldmfd   sp!, {r0-r7}
+               ldr     lr, [sp, #(4 * 7)]
+               ldr     r12, [sp, #(4 * 8)]
+               add     sp, sp, #(10 * 4)
+exit_fiq:
+               msr     spsr_cxsf, r12
+               add     r10, #1
+               cmp     r11, #0
+               moveqs  pc, lr
+               bx      r11 /* jump to custom fiq return function */
+
+nested_fiq:
+               orr     r12, r12, #(PSR_F_BIT)
+               b       exit_fiq
+
+fiq_glue_end:
+
+ENTRY(fiq_glue_setup) /* func, data, sp, smc call number */
+               stmfd           sp!, {r4}
+               mrs             r4, cpsr
+               msr             cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+               movs            r8, r0
+               mov             r9, r1
+               mov             sp, r2
+               mov             r11, r3
+               moveq           r10, #0
+               movne           r10, #1
+               msr             cpsr_c, r4
+               ldmfd           sp!, {r4}
+               bx              lr
+
diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c
new file mode 100644 (file)
index 0000000..8cb1b61
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <asm/fiq.h>
+#include <asm/fiq_glue.h>
+
+extern unsigned char fiq_glue, fiq_glue_end;
+extern void fiq_glue_setup(void *func, void *data, void *sp,
+                          fiq_return_handler_t fiq_return_handler);
+
+static struct fiq_handler fiq_debbuger_fiq_handler = {
+       .name = "fiq_glue",
+};
+DEFINE_PER_CPU(void *, fiq_stack);
+static struct fiq_glue_handler *current_handler;
+static fiq_return_handler_t fiq_return_handler;
+static DEFINE_MUTEX(fiq_glue_lock);
+
+static void fiq_glue_setup_helper(void *info)
+{
+       struct fiq_glue_handler *handler = info;
+       fiq_glue_setup(handler->fiq, handler,
+               __get_cpu_var(fiq_stack) + THREAD_START_SP,
+               fiq_return_handler);
+}
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler)
+{
+       int ret;
+       int cpu;
+
+       if (!handler || !handler->fiq)
+               return -EINVAL;
+
+       mutex_lock(&fiq_glue_lock);
+       if (fiq_stack) {
+               ret = -EBUSY;
+               goto err_busy;
+       }
+
+       for_each_possible_cpu(cpu) {
+               void *stack;
+               stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+               if (WARN_ON(!stack)) {
+                       ret = -ENOMEM;
+                       goto err_alloc_fiq_stack;
+               }
+               per_cpu(fiq_stack, cpu) = stack;
+       }
+
+       ret = claim_fiq(&fiq_debbuger_fiq_handler);
+       if (WARN_ON(ret))
+               goto err_claim_fiq;
+
+       current_handler = handler;
+       on_each_cpu(fiq_glue_setup_helper, handler, true);
+       set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue);
+
+       mutex_unlock(&fiq_glue_lock);
+       return 0;
+
+err_claim_fiq:
+err_alloc_fiq_stack:
+       for_each_possible_cpu(cpu) {
+               __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER);
+               per_cpu(fiq_stack, cpu) = NULL;
+       }
+err_busy:
+       mutex_unlock(&fiq_glue_lock);
+       return ret;
+}
+
+static void fiq_glue_update_return_handler(void (*fiq_return)(void))
+{
+       fiq_return_handler = fiq_return;
+       if (current_handler)
+               on_each_cpu(fiq_glue_setup_helper, current_handler, true);
+}
+
+int fiq_glue_set_return_handler(void (*fiq_return)(void))
+{
+       int ret;
+
+       mutex_lock(&fiq_glue_lock);
+       if (fiq_return_handler) {
+               ret = -EBUSY;
+               goto err_busy;
+       }
+       fiq_glue_update_return_handler(fiq_return);
+       ret = 0;
+err_busy:
+       mutex_unlock(&fiq_glue_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(fiq_glue_set_return_handler);
+
+int fiq_glue_clear_return_handler(void (*fiq_return)(void))
+{
+       int ret;
+
+       mutex_lock(&fiq_glue_lock);
+       if (WARN_ON(fiq_return_handler != fiq_return)) {
+               ret = -EINVAL;
+               goto err_inval;
+       }
+       fiq_glue_update_return_handler(NULL);
+       ret = 0;
+err_inval:
+       mutex_unlock(&fiq_glue_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(fiq_glue_clear_return_handler);
+
+/**
+ * fiq_glue_resume - Restore fiqs after suspend or low power idle states
+ *
+ * This must be called before calling local_fiq_enable after returning from a
+ * power state where the fiq mode registers were lost. If a driver provided
+ * a resume hook when it registered the handler it will be called.
+ */
+
+void fiq_glue_resume(void)
+{
+       if (!current_handler)
+               return;
+       fiq_glue_setup(current_handler->fiq, current_handler,
+               __get_cpu_var(fiq_stack) + THREAD_START_SP,
+               fiq_return_handler);
+       if (current_handler->resume)
+               current_handler->resume(current_handler);
+}
+
diff --git a/arch/arm/configs/ranchu_defconfig b/arch/arm/configs/ranchu_defconfig
new file mode 100644 (file)
index 0000000..35a90af
--- /dev/null
@@ -0,0 +1,315 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_ARCH_MMAP_RND_BITS=16
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_VIRT=y
+CONFIG_ARM_KERNMEM_PERMS=y
+CONFIG_SMP=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_KSM=y
+CONFIG_SECCOMP=y
+CONFIG_CMDLINE="console=ttyAMA0"
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_VIRTIO_NET=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_USB_USBNET=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_INPUT_TABLET=y
+CONFIG_TABLET_USB_ACECAD=y
+CONFIG_TABLET_USB_AIPTEK=y
+CONFIG_TABLET_USB_GTCO=y
+CONFIG_TABLET_USB_HANWANG=y
+CONFIG_TABLET_USB_KBTAB=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_VIRTIO_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_FB=y
+CONFIG_FB_GOLDFISH=y
+CONFIG_FB_SIMPLE=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_HOLTEK=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_SW_SYNC_USER=y
+CONFIG_ION=y
+CONFIG_GOLDFISH_AUDIO=y
+CONFIG_GOLDFISH=y
+CONFIG_GOLDFISH_PIPE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_PANIC_TIMEOUT=5
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_VIRTUALIZATION=y
diff --git a/arch/arm/include/asm/fiq_glue.h b/arch/arm/include/asm/fiq_glue.h
new file mode 100644 (file)
index 0000000..a9e244f
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_FIQ_GLUE_H
+#define __ASM_FIQ_GLUE_H
+
+struct fiq_glue_handler {
+       void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp);
+       void (*resume)(struct fiq_glue_handler *h);
+};
+typedef void (*fiq_return_handler_t)(void);
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler);
+int fiq_glue_set_return_handler(fiq_return_handler_t fiq_return);
+int fiq_glue_clear_return_handler(fiq_return_handler_t fiq_return);
+
+#ifdef CONFIG_FIQ_GLUE
+void fiq_glue_resume(void);
+#else
+static inline void fiq_glue_resume(void) {}
+#endif
+
+#endif
index fd929b5ded9e2eb1cb313af62451d0efdd4e2ede..9459ca85bd20c6301a490d2ff9a96446abd550c2 100644 (file)
@@ -250,6 +250,7 @@ PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
 PMD_BIT_FUNC(mksplitting, |= L_PMD_SECT_SPLITTING);
 PMD_BIT_FUNC(mkwrite,   &= ~L_PMD_SECT_RDONLY);
 PMD_BIT_FUNC(mkdirty,   |= L_PMD_SECT_DIRTY);
+PMD_BIT_FUNC(mkclean,   &= ~L_PMD_SECT_DIRTY);
 PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
 
 #define pmd_mkhuge(pmd)                (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
index 370f7a732900ae12e8831e6f3ce7390d16455fc8..d0606412069492767c5f62158e4f9b24ee073570 100644 (file)
@@ -3,6 +3,7 @@
 
 #ifdef CONFIG_ARM_CPU_TOPOLOGY
 
+#include <linux/cpufreq.h>
 #include <linux/cpumask.h>
 
 struct cputopo_arm {
@@ -24,6 +25,12 @@ void init_cpu_topology(void);
 void store_cpu_topology(unsigned int cpuid);
 const struct cpumask *cpu_coregroup_mask(int cpu);
 
+#ifdef CONFIG_CPU_FREQ
+#define arch_scale_freq_capacity cpufreq_scale_freq_capacity
+#endif
+#define arch_scale_cpu_capacity scale_cpu_capacity
+extern unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu);
+
 #else
 
 static inline void init_cpu_topology(void) { }
index 871b8267d211af0e5eaf9d5fb9f8329ae33696e1..a586cfe7b4e42ae0a4c235576b5eb5dce8235ad3 100644 (file)
@@ -10,7 +10,6 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#include <linux/compiler.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
  * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
  *            (http://gcc.gnu.org/PR8896) and incorrect structure
  *           initialisation in fs/jffs2/erase.c
- * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
- *           miscompiles find_get_entry(), and can result in EXT3 and EXT4
- *           filesystem corruption (possibly other FS too).
  */
-#ifdef __GNUC__
 #if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
 #error Your compiler is too buggy; it is known to miscompile kernels.
-#error    Known good compilers: 3.3, 4.x
-#endif
-#if GCC_VERSION >= 40800 && GCC_VERSION < 40803
-#error Your compiler is too buggy; it is known to miscompile kernels
-#error and result in filesystem corruption and oopses.
-#endif
+#error    Known good compilers: 3.3
 #endif
 
 int main(void)
index 9232caee70604c686e07b7aef4c21598c7196fa4..f3c662299531351d62c7e4081c6d3158778d5e05 100644 (file)
@@ -140,6 +140,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
 
 static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr)
 {
+       if (user_mode(regs))
+               return -1;
        kgdb_handle_exception(1, SIGTRAP, 0, regs);
 
        return 0;
@@ -147,6 +149,8 @@ static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr)
 
 static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
 {
+       if (user_mode(regs))
+               return -1;
        compiled_break = 1;
        kgdb_handle_exception(1, SIGTRAP, 0, regs);
 
index 4adfb46e3ee93276ea506b723eb11e33c57459f4..0017b3edfea8b71898ca504ee43edeaa2a5529a1 100644 (file)
@@ -80,6 +80,7 @@ void arch_cpu_idle_prepare(void)
 
 void arch_cpu_idle_enter(void)
 {
+       idle_notifier_call_chain(IDLE_START);
        ledtrig_cpu(CPU_LED_IDLE_START);
 #ifdef CONFIG_PL310_ERRATA_769419
        wmb();
@@ -89,6 +90,78 @@ void arch_cpu_idle_enter(void)
 void arch_cpu_idle_exit(void)
 {
        ledtrig_cpu(CPU_LED_IDLE_END);
+       idle_notifier_call_chain(IDLE_END);
+}
+
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+       int     i, j;
+       int     nlines;
+       u32     *p;
+
+       /*
+        * don't attempt to dump non-kernel addresses or
+        * values that are probably just small negative numbers
+        */
+       if (addr < PAGE_OFFSET || addr > -256UL)
+               return;
+
+       printk("\n%s: %#lx:\n", name, addr);
+
+       /*
+        * round address down to a 32 bit boundary
+        * and always dump a multiple of 32 bytes
+        */
+       p = (u32 *)(addr & ~(sizeof(u32) - 1));
+       nbytes += (addr & (sizeof(u32) - 1));
+       nlines = (nbytes + 31) / 32;
+
+
+       for (i = 0; i < nlines; i++) {
+               /*
+                * just display low 16 bits of address to keep
+                * each line of the dump < 80 characters
+                */
+               printk("%04lx ", (unsigned long)p & 0xffff);
+               for (j = 0; j < 8; j++) {
+                       u32     data;
+                       if (probe_kernel_address(p, data)) {
+                               printk(" ********");
+                       } else {
+                               printk(" %08x", data);
+                       }
+                       ++p;
+               }
+               printk("\n");
+       }
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+       mm_segment_t fs;
+
+       fs = get_fs();
+       set_fs(KERNEL_DS);
+       show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
+       show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
+       show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
+       show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
+       show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
+       show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
+       show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
+       show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
+       show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
+       show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
+       show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
+       show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
+       show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
+       show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
+       show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
+       show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
+       set_fs(fs);
 }
 
 void __show_regs(struct pt_regs *regs)
@@ -178,6 +251,8 @@ void __show_regs(struct pt_regs *regs)
                printk("Control: %08x%s\n", ctrl, buf);
        }
 #endif
+
+       show_extra_register_data(regs, 128);
 }
 
 void show_regs(struct pt_regs * regs)
index 38269358fd252c6bb93fd58a0478319c436cdfd3..1a06da8f0366d9b1056f220c26d9386219f09c60 100644 (file)
@@ -6,6 +6,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/console.h>
 #include <linux/cpu.h>
 #include <linux/delay.h>
 #include <linux/reboot.h>
@@ -124,6 +125,31 @@ void machine_power_off(void)
                pm_power_off();
 }
 
+#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
+void arm_machine_flush_console(void)
+{
+       printk("\n");
+       pr_emerg("Restarting %s\n", linux_banner);
+       if (console_trylock()) {
+               console_unlock();
+               return;
+       }
+
+       mdelay(50);
+
+       local_irq_disable();
+       if (!console_trylock())
+               pr_emerg("arm_restart: Console was locked! Busting\n");
+       else
+               pr_emerg("arm_restart: Console was locked!\n");
+       console_unlock();
+}
+#else
+void arm_machine_flush_console(void)
+{
+}
+#endif
+
 /*
  * Restart requires that the secondary CPUs stop performing any activity
  * while the primary CPU resets the system. Systems with a single CPU can
@@ -140,6 +166,10 @@ void machine_restart(char *cmd)
        local_irq_disable();
        smp_send_stop();
 
+       /* Flush the console to make sure all the relevant messages make it
+        * out to the console drivers */
+       arm_machine_flush_console();
+
        if (arm_pm_restart)
                arm_pm_restart(reboot_mode, cmd);
        else
index 20edd349d379f22c583438db7fbf52f46e1133ef..bf63b4693457c21e84b8df86c3af69d3745930d8 100644 (file)
@@ -772,7 +772,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
        struct resource *res;
 
        kernel_code.start   = virt_to_phys(_text);
-       kernel_code.end     = virt_to_phys(_etext - 1);
+       kernel_code.end     = virt_to_phys(__init_begin - 1);
        kernel_data.start   = virt_to_phys(_sdata);
        kernel_data.end     = virt_to_phys(_end - 1);
 
index 08b7847bf9124f004d7214c0e9c4dae23c0fffd2..4f2c51ef162d4c6d0eff8b34ca7d96a2e39de117 100644 (file)
  */
 static DEFINE_PER_CPU(unsigned long, cpu_scale);
 
-unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu)
 {
+#ifdef CONFIG_CPU_FREQ
+       unsigned long max_freq_scale = cpufreq_scale_max_freq_capacity(cpu);
+
+       return per_cpu(cpu_scale, cpu) * max_freq_scale >> SCHED_CAPACITY_SHIFT;
+#else
        return per_cpu(cpu_scale, cpu);
+#endif
 }
 
 static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
@@ -153,6 +159,8 @@ static void __init parse_dt_topology(void)
 
 }
 
+static const struct sched_group_energy * const cpu_core_energy(int cpu);
+
 /*
  * Look for a customed capacity of a CPU in the cpu_capacity table during the
  * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
@@ -160,10 +168,14 @@ static void __init parse_dt_topology(void)
  */
 static void update_cpu_capacity(unsigned int cpu)
 {
-       if (!cpu_capacity(cpu))
-               return;
+       unsigned long capacity = SCHED_CAPACITY_SCALE;
+
+       if (cpu_core_energy(cpu)) {
+               int max_cap_idx = cpu_core_energy(cpu)->nr_cap_states - 1;
+               capacity = cpu_core_energy(cpu)->cap_states[max_cap_idx].cap;
+       }
 
-       set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+       set_capacity_scale(cpu, capacity);
 
        pr_info("CPU%u: update cpu_capacity %lu\n",
                cpu, arch_scale_cpu_capacity(NULL, cpu));
@@ -275,17 +287,138 @@ void store_cpu_topology(unsigned int cpuid)
                cpu_topology[cpuid].socket_id, mpidr);
 }
 
+/*
+ * ARM TC2 specific energy cost model data. There are no unit requirements for
+ * the data. Data can be normalized to any reference point, but the
+ * normalization must be consistent. That is, one bogo-joule/watt must be the
+ * same quantity for all data, but we don't care what it is.
+ */
+static struct idle_state idle_states_cluster_a7[] = {
+        { .power = 25 }, /* arch_cpu_idle() (active idle) = WFI */
+        { .power = 25 }, /* WFI */
+        { .power = 10 }, /* cluster-sleep-l */
+       };
+
+static struct idle_state idle_states_cluster_a15[] = {
+        { .power = 70 }, /* arch_cpu_idle() (active idle) = WFI */
+        { .power = 70 }, /* WFI */
+        { .power = 25 }, /* cluster-sleep-b */
+       };
+
+static struct capacity_state cap_states_cluster_a7[] = {
+       /* Cluster only power */
+        { .cap =  150, .power = 2967, }, /*  350 MHz */
+        { .cap =  172, .power = 2792, }, /*  400 MHz */
+        { .cap =  215, .power = 2810, }, /*  500 MHz */
+        { .cap =  258, .power = 2815, }, /*  600 MHz */
+        { .cap =  301, .power = 2919, }, /*  700 MHz */
+        { .cap =  344, .power = 2847, }, /*  800 MHz */
+        { .cap =  387, .power = 3917, }, /*  900 MHz */
+        { .cap =  430, .power = 4905, }, /* 1000 MHz */
+       };
+
+static struct capacity_state cap_states_cluster_a15[] = {
+       /* Cluster only power */
+        { .cap =  426, .power =  7920, }, /*  500 MHz */
+        { .cap =  512, .power =  8165, }, /*  600 MHz */
+        { .cap =  597, .power =  8172, }, /*  700 MHz */
+        { .cap =  682, .power =  8195, }, /*  800 MHz */
+        { .cap =  768, .power =  8265, }, /*  900 MHz */
+        { .cap =  853, .power =  8446, }, /* 1000 MHz */
+        { .cap =  938, .power = 11426, }, /* 1100 MHz */
+        { .cap = 1024, .power = 15200, }, /* 1200 MHz */
+       };
+
+static struct sched_group_energy energy_cluster_a7 = {
+         .nr_idle_states = ARRAY_SIZE(idle_states_cluster_a7),
+         .idle_states    = idle_states_cluster_a7,
+         .nr_cap_states  = ARRAY_SIZE(cap_states_cluster_a7),
+         .cap_states     = cap_states_cluster_a7,
+};
+
+static struct sched_group_energy energy_cluster_a15 = {
+         .nr_idle_states = ARRAY_SIZE(idle_states_cluster_a15),
+         .idle_states    = idle_states_cluster_a15,
+         .nr_cap_states  = ARRAY_SIZE(cap_states_cluster_a15),
+         .cap_states     = cap_states_cluster_a15,
+};
+
+static struct idle_state idle_states_core_a7[] = {
+        { .power = 0 }, /* arch_cpu_idle (active idle) = WFI */
+        { .power = 0 }, /* WFI */
+        { .power = 0 }, /* cluster-sleep-l */
+       };
+
+static struct idle_state idle_states_core_a15[] = {
+        { .power = 0 }, /* arch_cpu_idle (active idle) = WFI */
+        { .power = 0 }, /* WFI */
+        { .power = 0 }, /* cluster-sleep-b */
+       };
+
+static struct capacity_state cap_states_core_a7[] = {
+       /* Power per cpu */
+        { .cap =  150, .power =  187, }, /*  350 MHz */
+        { .cap =  172, .power =  275, }, /*  400 MHz */
+        { .cap =  215, .power =  334, }, /*  500 MHz */
+        { .cap =  258, .power =  407, }, /*  600 MHz */
+        { .cap =  301, .power =  447, }, /*  700 MHz */
+        { .cap =  344, .power =  549, }, /*  800 MHz */
+        { .cap =  387, .power =  761, }, /*  900 MHz */
+        { .cap =  430, .power = 1024, }, /* 1000 MHz */
+       };
+
+static struct capacity_state cap_states_core_a15[] = {
+       /* Power per cpu */
+        { .cap =  426, .power = 2021, }, /*  500 MHz */
+        { .cap =  512, .power = 2312, }, /*  600 MHz */
+        { .cap =  597, .power = 2756, }, /*  700 MHz */
+        { .cap =  682, .power = 3125, }, /*  800 MHz */
+        { .cap =  768, .power = 3524, }, /*  900 MHz */
+        { .cap =  853, .power = 3846, }, /* 1000 MHz */
+        { .cap =  938, .power = 5177, }, /* 1100 MHz */
+        { .cap = 1024, .power = 6997, }, /* 1200 MHz */
+       };
+
+static struct sched_group_energy energy_core_a7 = {
+         .nr_idle_states = ARRAY_SIZE(idle_states_core_a7),
+         .idle_states    = idle_states_core_a7,
+         .nr_cap_states  = ARRAY_SIZE(cap_states_core_a7),
+         .cap_states     = cap_states_core_a7,
+};
+
+static struct sched_group_energy energy_core_a15 = {
+         .nr_idle_states = ARRAY_SIZE(idle_states_core_a15),
+         .idle_states    = idle_states_core_a15,
+         .nr_cap_states  = ARRAY_SIZE(cap_states_core_a15),
+         .cap_states     = cap_states_core_a15,
+};
+
+/* sd energy functions */
+static inline
+const struct sched_group_energy * const cpu_cluster_energy(int cpu)
+{
+       return cpu_topology[cpu].socket_id ? &energy_cluster_a7 :
+                       &energy_cluster_a15;
+}
+
+static inline
+const struct sched_group_energy * const cpu_core_energy(int cpu)
+{
+       return cpu_topology[cpu].socket_id ? &energy_core_a7 :
+                       &energy_core_a15;
+}
+
 static inline int cpu_corepower_flags(void)
 {
-       return SD_SHARE_PKG_RESOURCES  | SD_SHARE_POWERDOMAIN;
+       return SD_SHARE_PKG_RESOURCES  | SD_SHARE_POWERDOMAIN | \
+              SD_SHARE_CAP_STATES;
 }
 
 static struct sched_domain_topology_level arm_topology[] = {
 #ifdef CONFIG_SCHED_MC
-       { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
-       { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
+       { cpu_coregroup_mask, cpu_corepower_flags, cpu_core_energy, SD_INIT_NAME(MC) },
 #endif
-       { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+       { cpu_cpu_mask, NULL, cpu_cluster_energy, SD_INIT_NAME(DIE) },
        { NULL, },
 };
 
index 8b60fde5ce48a628e5d1f2c682d2aa0684ed6642..be2ab6d3b91f67d6b04e0d446776f5475eac4c7b 100644 (file)
@@ -120,6 +120,8 @@ SECTIONS
 #ifdef CONFIG_DEBUG_RODATA
        . = ALIGN(1<<SECTION_SHIFT);
 #endif
+       _etext = .;                     /* End of text section */
+
        RO_DATA(PAGE_SIZE)
 
        . = ALIGN(4);
@@ -150,8 +152,6 @@ SECTIONS
 
        NOTES
 
-       _etext = .;                     /* End of text and rodata section */
-
 #ifndef CONFIG_XIP_KERNEL
 # ifdef CONFIG_ARM_KERNMEM_PERMS
        . = ALIGN(1<<SECTION_SHIFT);
index 24659952c2784de64a53dc2e889ab616bd19b12b..11da0f50a1fef80c0789e653f8307ccee434e5c6 100644 (file)
@@ -270,6 +270,11 @@ v6_dma_clean_range:
  *     - end     - virtual end address of region
  */
 ENTRY(v6_dma_flush_range)
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+       sub     r2, r1, r0
+       cmp     r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT
+       bhi     v6_dma_flush_dcache_all
+#endif
 #ifdef CONFIG_DMA_CACHE_RWFO
        ldrb    r2, [r0]                @ read for ownership
        strb    r2, [r0]                @ write for ownership
@@ -292,6 +297,18 @@ ENTRY(v6_dma_flush_range)
        mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer
        ret     lr
 
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+v6_dma_flush_dcache_all:
+       mov     r0, #0
+#ifdef HARVARD_CACHE
+       mcr     p15, 0, r0, c7, c14, 0          @ D cache clean+invalidate
+#else
+       mcr     p15, 0, r0, c7, c15, 0          @ Cache clean+invalidate
+#endif
+       mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer
+       mov     pc, lr
+#endif
+
 /*
  *     dma_map_area(start, size, dir)
  *     - start - kernel virtual start address
index daafcf121ce082aa0a0fbb43be3e3712b2942f3e..506c225c66cc22def94ea2bf36653a2d6c870797 100644 (file)
@@ -273,10 +273,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
                local_irq_enable();
 
        /*
-        * If we're in an interrupt or have no user
+        * If we're in an interrupt, or have no irqs, or have no user
         * context, we must not take the fault..
         */
-       if (faulthandler_disabled() || !mm)
+       if (faulthandler_disabled() || irqs_disabled() || !mm)
                goto no_context;
 
        if (user_mode(regs))
index 407dc786583aec0e6077f50a8374f99002ca8233..66353caa35b9f78fa2aa4754dea3ce813593303f 100644 (file)
@@ -173,8 +173,7 @@ unsigned long arch_mmap_rnd(void)
 {
        unsigned long rnd;
 
-       /* 8 bits of randomness in 20 address space bits */
-       rnd = (unsigned long)get_random_int() % (1 << 8);
+       rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
 
        return rnd << PAGE_SHIFT;
 }
index 4867f5daf82c99bdf4ee64ebb6b61613cd792a3e..de9f8921e4072b9e7db1fb3113548719913b3d61 100644 (file)
@@ -572,7 +572,7 @@ static void __init build_mem_type_table(void)
         * in the Short-descriptor translation table format descriptors.
         */
        if (cpu_arch == CPU_ARCH_ARMv7 &&
-               (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) {
+               (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) >= 4) {
                user_pmd_table |= PMD_PXNTABLE;
        }
 #endif
index 3510b01acc8cab9486d1f88bcb4076b6122941e4..2543791ce8c2ea95903a850e794effca827358a4 100644 (file)
@@ -54,6 +54,8 @@ config ARM64
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
        select HAVE_ARCH_KGDB
+       select HAVE_ARCH_MMAP_RND_BITS
+       select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_BPF_JIT
@@ -112,9 +114,40 @@ config ARCH_PHYS_ADDR_T_64BIT
 config MMU
        def_bool y
 
+config ARCH_MMAP_RND_BITS_MIN
+       default 14 if ARM64_64K_PAGES
+       default 16 if ARM64_16K_PAGES
+       default 18
+
+# max bits determined by the following formula:
+#  VA_BITS - PAGE_SHIFT - 3
+config ARCH_MMAP_RND_BITS_MAX
+       default 19 if ARM64_VA_BITS=36
+       default 24 if ARM64_VA_BITS=39
+       default 27 if ARM64_VA_BITS=42
+       default 30 if ARM64_VA_BITS=47
+       default 29 if ARM64_VA_BITS=48 && ARM64_64K_PAGES
+       default 31 if ARM64_VA_BITS=48 && ARM64_16K_PAGES
+       default 33 if ARM64_VA_BITS=48
+       default 14 if ARM64_64K_PAGES
+       default 16 if ARM64_16K_PAGES
+       default 18
+
+config ARCH_MMAP_RND_COMPAT_BITS_MIN
+       default 7 if ARM64_64K_PAGES
+       default 9 if ARM64_16K_PAGES
+       default 11
+
+config ARCH_MMAP_RND_COMPAT_BITS_MAX
+       default 16
+
 config NO_IOPORT_MAP
        def_bool y if !PCI
 
+config ILLEGAL_POINTER_VALUE
+       hex
+       default 0xdead000000000000
+
 config STACKTRACE_SUPPORT
        def_bool y
 
@@ -686,6 +719,14 @@ config SETEND_EMULATION
          If unsure, say Y
 endif
 
+config ARM64_SW_TTBR0_PAN
+       bool "Emulate Priviledged Access Never using TTBR0_EL1 switching"
+       help
+         Enabling this option prevents the kernel from accessing
+         user-space memory directly by pointing TTBR0_EL1 to a reserved
+         zeroed area and reserved ASID. The user access routines
+         restore the valid TTBR0_EL1 temporarily.
+
 menu "ARMv8.1 architectural features"
 
 config ARM64_HW_AFDBM
@@ -775,7 +816,7 @@ config RELOCATABLE
 
 config RANDOMIZE_BASE
        bool "Randomize the address of the kernel image"
-       select ARM64_MODULE_PLTS
+       select ARM64_MODULE_PLTS if MODULES
        select RELOCATABLE
        help
          Randomizes the virtual address at which the kernel image is
@@ -794,7 +835,7 @@ config RANDOMIZE_BASE
 
 config RANDOMIZE_MODULE_REGION_FULL
        bool "Randomize the module region independently from the core kernel"
-       depends on RANDOMIZE_BASE
+       depends on RANDOMIZE_BASE && !DYNAMIC_FTRACE
        default y
        help
          Randomizes the location of the module region without considering the
@@ -828,6 +869,23 @@ config CMDLINE
          entering them here. As a minimum, you should specify the the
          root device (e.g. root=/dev/nfs).
 
+choice
+       prompt "Kernel command line type" if CMDLINE != ""
+       default CMDLINE_FROM_BOOTLOADER
+
+config CMDLINE_FROM_BOOTLOADER
+       bool "Use bootloader kernel arguments if available"
+       help
+         Uses the command-line options passed by the boot loader. If
+         the boot loader doesn't provide any, the default kernel command
+         string provided in CMDLINE will be used.
+
+config CMDLINE_EXTEND
+       bool "Extend bootloader kernel arguments"
+       help
+         The command-line arguments provided by the boot loader will be
+         appended to the default kernel command string.
+
 config CMDLINE_FORCE
        bool "Always use the default kernel command string"
        help
@@ -835,6 +893,7 @@ config CMDLINE_FORCE
          loader passes other arguments to the kernel.
          This is useful if you cannot or don't want to change the
          command-line options your boot loader passes to the kernel.
+endchoice
 
 config EFI_STUB
        bool
@@ -867,6 +926,21 @@ config DMI
          However, even with this option, the resultant kernel should
          continue to boot on existing non-UEFI platforms.
 
+config BUILD_ARM64_APPENDED_DTB_IMAGE
+       bool "Build a concatenated Image.gz/dtb by default"
+       depends on OF
+       help
+         Enabling this option will cause a concatenated Image.gz and list of
+         DTBs to be built by default (instead of a standalone Image.gz.)
+         The image will built in arch/arm64/boot/Image.gz-dtb
+
+config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES
+       string "Default dtb names"
+       depends on BUILD_ARM64_APPENDED_DTB_IMAGE
+       help
+         Space separated list of names of dtbs to append when
+         building a concatenated Image.gz-dtb.
+
 endmenu
 
 menu "Userspace binary formats"
index 0a9bf4500852e988688cccb4f848cadecfe47ea6..101632379b8b054dd5186616c9dba550511f6767 100644 (file)
@@ -31,6 +31,7 @@ $(warning LSE atomics not supported by binutils)
 endif
 
 KBUILD_CFLAGS  += -mgeneral-regs-only $(lseinstr)
+KBUILD_CFLAGS  += -fno-pic
 KBUILD_CFLAGS  += $(call cc-option, -mpc-relative-literal-loads)
 KBUILD_CFLAGS  += -fno-asynchronous-unwind-tables
 KBUILD_AFLAGS  += $(lseinstr)
@@ -83,7 +84,12 @@ libs-y               := arch/arm64/lib/ $(libs-y)
 core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
 
 # Default target when executing plain make
+ifeq ($(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE),y)
+KBUILD_IMAGE   := Image.gz-dtb
+else
 KBUILD_IMAGE   := Image.gz
+endif
+
 KBUILD_DTBS    := dtbs
 
 all:   $(KBUILD_IMAGE) $(KBUILD_DTBS)
@@ -110,6 +116,9 @@ dtbs: prepare scripts
 dtbs_install:
        $(Q)$(MAKE) $(dtbinst)=$(boot)/dts
 
+Image-dtb Image.gz-dtb: vmlinux scripts dtbs
+       $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
 PHONY += vdso_install
 vdso_install:
        $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
index 8dab0bb6ae667c5f89da8aca74a0a3bb61dcfaa9..34e35209fc2ed7b8d21d1306e15fe6563181952c 100644 (file)
@@ -1,2 +1,4 @@
 Image
+Image-dtb
 Image.gz
+Image.gz-dtb
index abcbba2f01baad4c76f0c414372ba4713beafa11..7ab8e74cd83acf1521805f593fdc4245c74a1b41 100644 (file)
 # Based on the ia64 boot/Makefile.
 #
 
+include $(srctree)/arch/arm64/boot/dts/Makefile
+
 targets := Image Image.gz
 
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
+
 $(obj)/Image: vmlinux FORCE
        $(call if_changed,objcopy)
 
 $(obj)/Image.bz2: $(obj)/Image FORCE
        $(call if_changed,bzip2)
 
+$(obj)/Image-dtb: $(obj)/Image $(DTB_OBJS) FORCE
+       $(call if_changed,cat)
+
 $(obj)/Image.gz: $(obj)/Image FORCE
        $(call if_changed,gzip)
 
@@ -34,6 +47,9 @@ $(obj)/Image.lzma: $(obj)/Image FORCE
 $(obj)/Image.lzo: $(obj)/Image FORCE
        $(call if_changed,lzo)
 
+$(obj)/Image.gz-dtb: $(obj)/Image.gz $(DTB_OBJS) FORCE
+       $(call if_changed,cat)
+
 install: $(obj)/Image
        $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
        $(obj)/Image System.map "$(INSTALL_PATH)"
index eb3c42d971750372d3194cf935f9148b9892f53c..062edb21099374db93a886c90082f26db4fee269 100644 (file)
@@ -21,3 +21,17 @@ dtstree              := $(srctree)/$(src)
 dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(foreach d,$(dts-dirs), $(wildcard $(dtstree)/$(d)/*.dts)))
 
 always         := $(dtb-y)
+
+targets += dtbs
+
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+targets += $(DTB_LIST)
+
+dtbs: $(addprefix $(obj)/, $(DTB_LIST))
+
+clean-files := dts/*.dtb *.dtb
diff --git a/arch/arm64/configs/ranchu64_defconfig b/arch/arm64/configs/ranchu64_defconfig
new file mode 100644 (file)
index 0000000..00eb346
--- /dev/null
@@ -0,0 +1,311 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_ARCH_MMAP_RND_BITS=24
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_NR_CPUS=4
+CONFIG_PREEMPT=y
+CONFIG_KSM=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_CMDLINE="console=ttyAMA0"
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_TARGET_ECN=y
+CONFIG_IP_NF_TARGET_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_TARGET_HL=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_VIRTIO_NET=y
+CONFIG_SMC91X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TABLET=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_VIRTIO_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_BATTERY_GOLDFISH=y
+# CONFIG_HWMON is not set
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_FB=y
+CONFIG_FB_GOLDFISH=y
+CONFIG_FB_SIMPLE=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_SW_SYNC_USER=y
+CONFIG_ION=y
+CONFIG_GOLDFISH_AUDIO=y
+CONFIG_GOLDFISH=y
+CONFIG_GOLDFISH_PIPE=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=5
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_FTRACE is not set
+CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_DEBUG_RODATA=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
index beccbdefa106a4ba0dd7d98537427a37d2c26517..8746ff6abd7782210abe56c56b549e0c4e67ac68 100644 (file)
@@ -95,13 +95,11 @@ void apply_alternatives(void *start, size_t length);
  * The code that follows this macro will be assembled and linked as
  * normal. There are no restrictions on this code.
  */
-.macro alternative_if_not cap, enable = 1
-       .if \enable
+.macro alternative_if_not cap
        .pushsection .altinstructions, "a"
        altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
        .popsection
 661:
-       .endif
 .endm
 
 /*
@@ -118,27 +116,27 @@ void apply_alternatives(void *start, size_t length);
  *    alternative sequence it is defined in (branches into an
  *    alternative sequence are not fixed up).
  */
-.macro alternative_else, enable = 1
-       .if \enable
+.macro alternative_else
 662:   .pushsection .altinstr_replacement, "ax"
 663:
-       .endif
 .endm
 
 /*
  * Complete an alternative code sequence.
  */
-.macro alternative_endif, enable = 1
-       .if \enable
+.macro alternative_endif
 664:   .popsection
        .org    . - (664b-663b) + (662b-661b)
        .org    . - (662b-661b) + (664b-663b)
-       .endif
 .endm
 
 #define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...)  \
        alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
 
+.macro user_alt, label, oldinstr, newinstr, cond
+9999:  alternative_insn "\oldinstr", "\newinstr", \cond
+       _ASM_EXTABLE 9999b, \label
+.endm
 
 /*
  * Generate the assembly for UAO alternatives with exception table entries.
index 290e13428f4a1c6b1b9425938a80dcd69460e592..aeb4554b3af386f215ba9fbe75f0be671b13d8eb 100644 (file)
@@ -24,6 +24,7 @@
 #define __ASM_ASSEMBLER_H
 
 #include <asm/asm-offsets.h>
+#include <asm/cpufeature.h>
 #include <asm/page.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/ptrace.h>
        msr     daifclr, #2
        .endm
 
+       .macro  save_and_disable_irq, flags
+       mrs     \flags, daif
+       msr     daifset, #2
+       .endm
+
+       .macro  restore_irq, flags
+       msr     daif, \flags
+       .endm
+
 /*
  * Enable and disable debug exceptions.
  */
@@ -273,7 +283,16 @@ lr .req    x30             // link register
        add     \size, \kaddr, \size
        sub     \tmp2, \tmp1, #1
        bic     \kaddr, \kaddr, \tmp2
-9998:  dc      \op, \kaddr
+9998:
+       .if     (\op == cvau || \op == cvac)
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+       dc      \op, \kaddr
+alternative_else
+       dc      civac, \kaddr
+alternative_endif
+       .else
+       dc      \op, \kaddr
+       .endif
        add     \kaddr, \kaddr, \tmp1
        cmp     \kaddr, \size
        b.lo    9998b
@@ -352,4 +371,28 @@ lr .req    x30             // link register
        movk    \reg, :abs_g0_nc:\val
        .endm
 
+/*
+ * Return the current thread_info.
+ */
+       .macro  get_thread_info, rd
+       mrs     \rd, sp_el0
+       .endm
+
+/*
+ * Errata workaround post TTBR0_EL1 update.
+ */
+       .macro  post_ttbr0_update_workaround
+#ifdef CONFIG_CAVIUM_ERRATUM_27456
+alternative_if_not ARM64_WORKAROUND_CAVIUM_27456
+       nop
+       nop
+       nop
+alternative_else
+       ic      iallu
+       dsb     nsh
+       isb
+alternative_endif
+#endif
+       .endm
+
 #endif /* __ASM_ASSEMBLER_H */
index 308d96eaeeaeb0e8fc2f3e54c6a11fb3882a15a6..46ee050ab747c16b1b3ec2c5ba5ee34d0e516f0c 100644 (file)
@@ -189,6 +189,12 @@ static inline bool system_supports_mixed_endian_el0(void)
        return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
 }
 
+static inline bool system_uses_ttbr0_pan(void)
+{
+       return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
+               !cpus_have_cap(ARM64_HAS_PAN);
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif
index ef572206f1c3eb6658618b40568e662f7e1865fc..932f5a56d1a60d47ffc7b8c05ce2e67ea7374e7a 100644 (file)
@@ -1,8 +1,11 @@
 #ifndef _ASM_EFI_H
 #define _ASM_EFI_H
 
+#include <asm/cpufeature.h>
 #include <asm/io.h>
+#include <asm/mmu_context.h>
 #include <asm/neon.h>
+#include <asm/tlbflush.h>
 
 #ifdef CONFIG_EFI
 extern void efi_init(void);
@@ -10,6 +13,8 @@ extern void efi_init(void);
 #define efi_init()
 #endif
 
+int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+
 #define efi_call_virt(f, ...)                                          \
 ({                                                                     \
        efi_##f##_t *__f;                                               \
@@ -63,6 +68,34 @@ extern void efi_init(void);
  *   Services are enabled and the EFI_RUNTIME_SERVICES bit set.
  */
 
+static inline void efi_set_pgd(struct mm_struct *mm)
+{
+       __switch_mm(mm);
+
+       if (system_uses_ttbr0_pan()) {
+               if (mm != current->active_mm) {
+                       /*
+                        * Update the current thread's saved ttbr0 since it is
+                        * restored as part of a return from exception. Set
+                        * the hardware TTBR0_EL1 using cpu_switch_mm()
+                        * directly to enable potential errata workarounds.
+                        */
+                       update_saved_ttbr0(current, mm);
+                       cpu_switch_mm(mm->pgd, mm);
+               } else {
+                       /*
+                        * Defer the switch to the current thread's TTBR0_EL1
+                        * until uaccess_enable(). Restore the current
+                        * thread's saved ttbr0 corresponding to its active_mm
+                        * (if different from init_mm).
+                        */
+                       cpu_set_reserved_ttbr0();
+                       if (current->active_mm != &init_mm)
+                               update_saved_ttbr0(current, current->active_mm);
+               }
+       }
+}
+
 void efi_virtmap_load(void);
 void efi_virtmap_unload(void);
 
index 77eeb2cc648fd79d28cfc452c7373b6ebe04071b..f772e15c47663f0aafa1b287e8b9ac6aa3a55d2c 100644 (file)
@@ -74,6 +74,7 @@
 
 #define ESR_ELx_EC_SHIFT       (26)
 #define ESR_ELx_EC_MASK                (UL(0x3F) << ESR_ELx_EC_SHIFT)
+#define ESR_ELx_EC(esr)                (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
 
 #define ESR_ELx_IL             (UL(1) << 25)
 #define ESR_ELx_ISS_MASK       (ESR_ELx_IL - 1)
index f2585cdd32c29832566718e99d7b5fd9c61d2322..71dfa3b4231364363e08bd474ea474cc7fffd89c 100644 (file)
@@ -27,9 +27,9 @@
 #include <asm/sysreg.h>
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg)                \
+do {                                                                   \
+       uaccess_enable();                                               \
        asm volatile(                                                   \
-       ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,            \
-                   CONFIG_ARM64_PAN)                                   \
 "      prfm    pstl1strm, %2\n"                                        \
 "1:    ldxr    %w1, %2\n"                                              \
        insn "\n"                                                       \
 "      .popsection\n"                                                  \
        _ASM_EXTABLE(1b, 4b)                                            \
        _ASM_EXTABLE(2b, 4b)                                            \
-       ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,            \
-                   CONFIG_ARM64_PAN)                                   \
        : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp)       \
        : "r" (oparg), "Ir" (-EFAULT)                                   \
-       : "memory")
+       : "memory");                                                    \
+       uaccess_disable();                                              \
+} while (0)
 
 static inline int
 futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -118,8 +118,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
+       uaccess_enable();
        asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 "      prfm    pstl1strm, %2\n"
 "1:    ldxr    %w1, %2\n"
 "      sub     %w3, %w1, %w4\n"
@@ -134,10 +134,10 @@ ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 "      .popsection\n"
        _ASM_EXTABLE(1b, 4b)
        _ASM_EXTABLE(2b, 4b)
-ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
        : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
        : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
        : "memory");
+       uaccess_disable();
 
        *uval = val;
        return ret;
index 5c6375d8528bb8ddd313bfa2911f7a0d77819028..7803343e5881fbd7b2f635b25082d3e91d2583f8 100644 (file)
@@ -19,6 +19,8 @@
 #ifndef __ASM_KERNEL_PGTABLE_H
 #define __ASM_KERNEL_PGTABLE_H
 
+#include <asm/pgtable.h>
+#include <asm/sparsemem.h>
 
 /*
  * The linear mapping and the start of memory are both 2M aligned (per
 #define SWAPPER_DIR_SIZE       (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
 #define IDMAP_DIR_SIZE         (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+#define RESERVED_TTBR0_SIZE    (PAGE_SIZE)
+#else
+#define RESERVED_TTBR0_SIZE    (0)
+#endif
+
 /* Initial memory map size */
 #if ARM64_SWAPPER_USES_SECTION_MAPS
 #define SWAPPER_BLOCK_SHIFT    SECTION_SHIFT
  * (64k granule), or a multiple that can be mapped using contiguous bits
  * in the page tables: 32 * PMD_SIZE (16k granule)
  */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define ARM64_MEMSTART_ALIGN   SZ_512M
+#if defined(CONFIG_ARM64_4K_PAGES)
+#define ARM64_MEMSTART_SHIFT           PUD_SHIFT
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define ARM64_MEMSTART_SHIFT           (PMD_SHIFT + 5)
+#else
+#define ARM64_MEMSTART_SHIFT           PMD_SHIFT
+#endif
+
+/*
+ * sparsemem vmemmap imposes an additional requirement on the alignment of
+ * memstart_addr, due to the fact that the base of the vmemmap region
+ * has a direct correspondence, and needs to appear sufficiently aligned
+ * in the virtual address space.
+ */
+#if defined(CONFIG_SPARSEMEM_VMEMMAP) && ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
+#define ARM64_MEMSTART_ALIGN   (1UL << SECTION_SIZE_BITS)
 #else
-#define ARM64_MEMSTART_ALIGN   SZ_1G
+#define ARM64_MEMSTART_ALIGN   (1UL << ARM64_MEMSTART_SHIFT)
 #endif
 
 #endif /* __ASM_KERNEL_PGTABLE_H */
index d776037d199fa4ca0257ba6cf5beda36d001d6e2..ae11e8fdbfd2b29d7e326683cb49cd6a325ef39f 100644 (file)
@@ -196,7 +196,11 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define ARCH_PFN_OFFSET                ((unsigned long)PHYS_PFN_OFFSET)
 
 #define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define        virt_addr_valid(kaddr)  pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define _virt_addr_valid(kaddr)        pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define _virt_addr_is_linear(kaddr)    (((u64)(kaddr)) >= PAGE_OFFSET)
+#define virt_addr_valid(kaddr)         (_virt_addr_is_linear(kaddr) && \
+                                        _virt_addr_valid(kaddr))
 
 #endif
 
index a00f7cf35bbd4d80ce045bfeb0cbb6bd061aeaaa..4a32fd5f101dd5ec14f1fcc7d3e922c0cdd25c5f 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/sched.h>
 
 #include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
 #include <asm/proc-fns.h>
 #include <asm-generic/mm_hooks.h>
 #include <asm/cputype.h>
@@ -113,7 +114,7 @@ static inline void cpu_uninstall_idmap(void)
        local_flush_tlb_all();
        cpu_set_default_tcr_t0sz();
 
-       if (mm != &init_mm)
+       if (mm != &init_mm && !system_uses_ttbr0_pan())
                cpu_switch_mm(mm->pgd, mm);
 }
 
@@ -173,20 +174,26 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 }
 
-/*
- * This is the actual mm switch as far as the scheduler
- * is concerned.  No registers are touched.  We avoid
- * calling the CPU specific function when the mm hasn't
- * actually changed.
- */
-static inline void
-switch_mm(struct mm_struct *prev, struct mm_struct *next,
-         struct task_struct *tsk)
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+static inline void update_saved_ttbr0(struct task_struct *tsk,
+                                     struct mm_struct *mm)
 {
-       unsigned int cpu = smp_processor_id();
+       if (system_uses_ttbr0_pan()) {
+               BUG_ON(mm->pgd == swapper_pg_dir);
+               task_thread_info(tsk)->ttbr0 =
+                       virt_to_phys(mm->pgd) | ASID(mm) << 48;
+       }
+}
+#else
+static inline void update_saved_ttbr0(struct task_struct *tsk,
+                                     struct mm_struct *mm)
+{
+}
+#endif
 
-       if (prev == next)
-               return;
+static inline void __switch_mm(struct mm_struct *next)
+{
+       unsigned int cpu = smp_processor_id();
 
        /*
         * init_mm.pgd does not contain any user mappings and it is always
@@ -200,7 +207,23 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
        check_and_switch_context(next, cpu);
 }
 
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+         struct task_struct *tsk)
+{
+       if (prev != next)
+               __switch_mm(next);
+
+       /*
+        * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
+        * value may have not been initialised yet (activate_mm caller) or the
+        * ASID has changed since the last run (following the context switch
+        * of another thread of the same process).
+        */
+       update_saved_ttbr0(tsk, next);
+}
+
 #define deactivate_mm(tsk,mm)  do { } while (0)
-#define activate_mm(prev,next) switch_mm(prev, next, NULL)
+#define activate_mm(prev,next) switch_mm(prev, next, current)
 
 #endif
index e12af6754634b3d2aa031ae23ce25228dc766cfb..06ff7fd9e81feab27bb67f1a4af971ddc0ebf4cc 100644 (file)
@@ -17,6 +17,7 @@
 #define __ASM_MODULE_H
 
 #include <asm-generic/module.h>
+#include <asm/memory.h>
 
 #define MODULE_ARCH_VERMAGIC   "aarch64"
 
@@ -32,6 +33,10 @@ u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
                          Elf64_Sym *sym);
 
 #ifdef CONFIG_RANDOMIZE_BASE
+#ifdef CONFIG_MODVERSIONS
+#define ARCH_RELOCATES_KCRCTAB
+#define reloc_start            (kimage_vaddr - KIMAGE_VADDR)
+#endif
 extern u64 module_alloc_base;
 #else
 #define module_alloc_base      ((u64)_etext - MODULES_VSIZE)
index 1528d52eb8c0db9365df4e85c91b13524db3375b..5eedfd83acc772ae0324e41ffc965093829f1a6f 100644 (file)
@@ -21,6 +21,8 @@
 
 #include <uapi/asm/ptrace.h>
 
+#define _PSR_PAN_BIT           22
+
 /* Current Exception Level values, as contained in CurrentEL */
 #define CurrentEL_EL1          (1 << 2)
 #define CurrentEL_EL2          (2 << 2)
index 53ee219e76a735a8824df8b8b472ffb9febef29e..43a66881fd57c1b753639aee87958a6b347d4675 100644 (file)
@@ -37,13 +37,17 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
 "2:    ldaxr   %w0, %2\n"
 "      eor     %w1, %w0, %w0, ror #16\n"
 "      cbnz    %w1, 1b\n"
+       /* Serialise against any concurrent lockers */
        ARM64_LSE_ATOMIC_INSN(
        /* LL/SC */
 "      stxr    %w1, %w0, %2\n"
-"      cbnz    %w1, 2b\n", /* Serialise against any concurrent lockers */
-       /* LSE atomics */
 "      nop\n"
-"      nop\n")
+"      nop\n",
+       /* LSE atomics */
+"      mov     %w1, %w0\n"
+"      cas     %w0, %w0, %2\n"
+"      eor     %w1, %w1, %w0\n")
+"      cbnz    %w1, 2b\n"
        : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
        :
        : "memory");
index abd64bd1f6d9f0160a3122555cf23be1a30f87eb..b3325a9cb90fda6c9fb052e560a351d469c83fda 100644 (file)
@@ -47,6 +47,9 @@ typedef unsigned long mm_segment_t;
 struct thread_info {
        unsigned long           flags;          /* low level flags */
        mm_segment_t            addr_limit;     /* address limit */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       u64                     ttbr0;          /* saved TTBR0_EL1 */
+#endif
        struct task_struct      *task;          /* main task structure */
        int                     preempt_count;  /* 0 => preemptable, <0 => bug */
        int                     cpu;            /* cpu */
index a3e9d6fdbf2136a69f6bab5165b725b819a49e65..bbd362cd1ed1690e8a0ee0072ac235690d08e5a4 100644 (file)
@@ -22,6 +22,15 @@ void init_cpu_topology(void);
 void store_cpu_topology(unsigned int cpuid);
 const struct cpumask *cpu_coregroup_mask(int cpu);
 
+struct sched_domain;
+#ifdef CONFIG_CPU_FREQ
+#define arch_scale_freq_capacity cpufreq_scale_freq_capacity
+extern unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu);
+extern unsigned long cpufreq_scale_max_freq_capacity(int cpu);
+#endif
+#define arch_scale_cpu_capacity scale_cpu_capacity
+extern unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu);
+
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_ARM_TOPOLOGY_H */
index c3d445b42351e1a529d94c4ba44e9c4d37544821..c37c064d7cddd62ab5c3334ff89e3077db3ffa91 100644 (file)
@@ -18,6 +18,8 @@
 #ifndef __ASM_UACCESS_H
 #define __ASM_UACCESS_H
 
+#ifndef __ASSEMBLY__
+
 /*
  * User space memory access functions
  */
@@ -26,6 +28,7 @@
 
 #include <asm/alternative.h>
 #include <asm/cpufeature.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/ptrace.h>
 #include <asm/sysreg.h>
 #include <asm/errno.h>
@@ -123,6 +126,85 @@ static inline void set_fs(mm_segment_t fs)
        "       .long           (" #from " - .), (" #to " - .)\n"       \
        "       .popsection\n"
 
+/*
+ * User access enabling/disabling.
+ */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+static inline void uaccess_ttbr0_disable(void)
+{
+       unsigned long ttbr;
+
+       /* reserved_ttbr0 placed at the end of swapper_pg_dir */
+       ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
+       write_sysreg(ttbr, ttbr0_el1);
+       isb();
+}
+
+static inline void uaccess_ttbr0_enable(void)
+{
+       unsigned long flags;
+
+       /*
+        * Disable interrupts to avoid preemption between reading the 'ttbr0'
+        * variable and the MSR. A context switch could trigger an ASID
+        * roll-over and an update of 'ttbr0'.
+        */
+       local_irq_save(flags);
+       write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
+       isb();
+       local_irq_restore(flags);
+}
+#else
+static inline void uaccess_ttbr0_disable(void)
+{
+}
+
+static inline void uaccess_ttbr0_enable(void)
+{
+}
+#endif
+
+#define __uaccess_disable(alt)                                         \
+do {                                                                   \
+       if (system_uses_ttbr0_pan())                                    \
+               uaccess_ttbr0_disable();                                \
+       else                                                            \
+               asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt,          \
+                               CONFIG_ARM64_PAN));                     \
+} while (0)
+
+#define __uaccess_enable(alt)                                          \
+do {                                                                   \
+       if (system_uses_ttbr0_pan())                                    \
+               uaccess_ttbr0_enable();                                 \
+       else                                                            \
+               asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt,          \
+                               CONFIG_ARM64_PAN));                     \
+} while (0)
+
+static inline void uaccess_disable(void)
+{
+       __uaccess_disable(ARM64_HAS_PAN);
+}
+
+static inline void uaccess_enable(void)
+{
+       __uaccess_enable(ARM64_HAS_PAN);
+}
+
+/*
+ * These functions are no-ops when UAO is present.
+ */
+static inline void uaccess_disable_not_uao(void)
+{
+       __uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
+}
+
+static inline void uaccess_enable_not_uao(void)
+{
+       __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
+}
+
 /*
  * The "__xxx" versions of the user access functions do not verify the address
  * space - it must have been done previously with a separate "access_ok()"
@@ -150,8 +232,7 @@ static inline void set_fs(mm_segment_t fs)
 do {                                                                   \
        unsigned long __gu_val;                                         \
        __chk_user_ptr(ptr);                                            \
-       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
-                       CONFIG_ARM64_PAN));                             \
+       uaccess_enable_not_uao();                                       \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
                __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr),  \
@@ -172,9 +253,8 @@ do {                                                                        \
        default:                                                        \
                BUILD_BUG();                                            \
        }                                                               \
+       uaccess_disable_not_uao();                                      \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
-       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
-                       CONFIG_ARM64_PAN));                             \
 } while (0)
 
 #define __get_user(x, ptr)                                             \
@@ -219,8 +299,7 @@ do {                                                                        \
 do {                                                                   \
        __typeof__(*(ptr)) __pu_val = (x);                              \
        __chk_user_ptr(ptr);                                            \
-       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
-                       CONFIG_ARM64_PAN));                             \
+       uaccess_enable_not_uao();                                       \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
                __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr),  \
@@ -241,8 +320,7 @@ do {                                                                        \
        default:                                                        \
                BUILD_BUG();                                            \
        }                                                               \
-       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
-                       CONFIG_ARM64_PAN));                             \
+       uaccess_disable_not_uao();                                      \
 } while (0)
 
 #define __put_user(x, ptr)                                             \
@@ -327,4 +405,73 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
 extern __must_check long strlen_user(const char __user *str);
 extern __must_check long strnlen_user(const char __user *str, long n);
 
+#else  /* __ASSEMBLY__ */
+
+#include <asm/alternative.h>
+#include <asm/assembler.h>
+#include <asm/kernel-pgtable.h>
+
+/*
+ * User access enabling/disabling macros.
+ */
+       .macro  uaccess_ttbr0_disable, tmp1
+       mrs     \tmp1, ttbr1_el1                // swapper_pg_dir
+       add     \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
+       msr     ttbr0_el1, \tmp1                // set reserved TTBR0_EL1
+       isb
+       .endm
+
+       .macro  uaccess_ttbr0_enable, tmp1
+       get_thread_info \tmp1
+       ldr     \tmp1, [\tmp1, #TI_TTBR0]       // load saved TTBR0_EL1
+       msr     ttbr0_el1, \tmp1                // set the non-PAN TTBR0_EL1
+       isb
+       .endm
+
+/*
+ * These macros are no-ops when UAO is present.
+ */
+       .macro  uaccess_disable_not_uao, tmp1
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+alternative_if_not ARM64_HAS_PAN
+       uaccess_ttbr0_disable \tmp1
+alternative_else
+       nop
+       nop
+       nop
+       nop
+alternative_endif
+#endif
+alternative_if_not ARM64_ALT_PAN_NOT_UAO
+       nop
+alternative_else
+       SET_PSTATE_PAN(1)
+alternative_endif
+       .endm
+
+       .macro  uaccess_enable_not_uao, tmp1, tmp2
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+alternative_if_not ARM64_HAS_PAN
+       save_and_disable_irq \tmp2              // avoid preemption
+       uaccess_ttbr0_enable \tmp1
+       restore_irq \tmp2
+alternative_else
+       nop
+       nop
+       nop
+       nop
+       nop
+       nop
+       nop
+alternative_endif
+#endif
+alternative_if_not ARM64_ALT_PAN_NOT_UAO
+       nop
+alternative_else
+       SET_PSTATE_PAN(0)
+alternative_endif
+       .endm
+
+#endif /* __ASSEMBLY__ */
+
 #endif /* __ASM_UACCESS_H */
index 29348947652985e1556e4c7225cbfd17774a4d87..a0a0f2b20608bad8b9e92e23102c0a93913dc388 100644 (file)
@@ -281,9 +281,9 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
  * Error-checking SWP macros implemented using ldxr{b}/stxr{b}
  */
 #define __user_swpX_asm(data, addr, res, temp, B)              \
+do {                                                           \
+       uaccess_enable();                                       \
        __asm__ __volatile__(                                   \
-       ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,    \
-                   CONFIG_ARM64_PAN)                           \
        "0:     ldxr"B"         %w2, [%3]\n"                    \
        "1:     stxr"B"         %w0, %w1, [%3]\n"               \
        "       cbz             %w0, 2f\n"                      \
@@ -299,11 +299,11 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
        "       .popsection"                                    \
        _ASM_EXTABLE(0b, 4b)                                    \
        _ASM_EXTABLE(1b, 4b)                                    \
-       ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,    \
-               CONFIG_ARM64_PAN)                               \
        : "=&r" (res), "+r" (data), "=&r" (temp)                \
        : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT)              \
-       : "memory")
+       : "memory");                                            \
+       uaccess_disable();                                      \
+} while (0)
 
 #define __user_swp_asm(data, addr, res, temp) \
        __user_swpX_asm(data, addr, res, temp, "")
index 2bb17bd556f8dd5114e52b7fec90bf926cd012c7..dac70c160289569782878ac82c3525f37afd71b5 100644 (file)
@@ -38,6 +38,9 @@ int main(void)
   DEFINE(TI_FLAGS,             offsetof(struct thread_info, flags));
   DEFINE(TI_PREEMPT,           offsetof(struct thread_info, preempt_count));
   DEFINE(TI_ADDR_LIMIT,                offsetof(struct thread_info, addr_limit));
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+  DEFINE(TI_TTBR0,             offsetof(struct thread_info, ttbr0));
+#endif
   DEFINE(TI_TASK,              offsetof(struct thread_info, task));
   DEFINE(TI_CPU,               offsetof(struct thread_info, cpu));
   BLANK();
index 24ecbeb733ed354f24be2d11fcfb9eba3ddd939c..a0c41dae0d8118d5b9c888299cad0895f0ac9036 100644 (file)
@@ -44,6 +44,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
 #endif
 
 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+EXPORT_SYMBOL(cpu_hwcaps);
 
 #define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
        {                                               \
index 4eeb17198cfaf598fd403b1e4040c77f5ee3068a..b6abc852f2a142123150662bc6dd6bf5c3de62af 100644 (file)
  *
  */
 
-#include <linux/atomic.h>
 #include <linux/dmi.h>
 #include <linux/efi.h>
-#include <linux/export.h>
-#include <linux/memblock.h>
-#include <linux/mm_types.h>
-#include <linux/bootmem.h>
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/preempt.h>
-#include <linux/rbtree.h>
-#include <linux/rwsem.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
+#include <linux/init.h>
 
-#include <asm/cacheflush.h>
 #include <asm/efi.h>
-#include <asm/tlbflush.h>
-#include <asm/mmu_context.h>
-#include <asm/mmu.h>
-#include <asm/pgtable.h>
 
-struct efi_memory_map memmap;
-
-static u64 efi_system_table;
-
-static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss;
-
-static struct mm_struct efi_mm = {
-       .mm_rb                  = RB_ROOT,
-       .pgd                    = efi_pgd,
-       .mm_users               = ATOMIC_INIT(2),
-       .mm_count               = ATOMIC_INIT(1),
-       .mmap_sem               = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
-       .page_table_lock        = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
-       .mmlist                 = LIST_HEAD_INIT(efi_mm.mmlist),
-};
-
-static int __init is_normal_ram(efi_memory_desc_t *md)
-{
-       if (md->attribute & EFI_MEMORY_WB)
-               return 1;
-       return 0;
-}
-
-/*
- * Translate a EFI virtual address into a physical address: this is necessary,
- * as some data members of the EFI system table are virtually remapped after
- * SetVirtualAddressMap() has been called.
- */
-static phys_addr_t efi_to_phys(unsigned long addr)
+int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
 {
-       efi_memory_desc_t *md;
-
-       for_each_efi_memory_desc(&memmap, md) {
-               if (!(md->attribute & EFI_MEMORY_RUNTIME))
-                       continue;
-               if (md->virt_addr == 0)
-                       /* no virtual mapping has been installed by the stub */
-                       break;
-               if (md->virt_addr <= addr &&
-                   (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
-                       return md->phys_addr + addr - md->virt_addr;
-       }
-       return addr;
-}
-
-static int __init uefi_init(void)
-{
-       efi_char16_t *c16;
-       void *config_tables;
-       u64 table_size;
-       char vendor[100] = "unknown";
-       int i, retval;
-
-       efi.systab = early_memremap(efi_system_table,
-                                   sizeof(efi_system_table_t));
-       if (efi.systab == NULL) {
-               pr_warn("Unable to map EFI system table.\n");
-               return -ENOMEM;
-       }
-
-       set_bit(EFI_BOOT, &efi.flags);
-       set_bit(EFI_64BIT, &efi.flags);
+       pteval_t prot_val;
 
        /*
-        * Verify the EFI Table
+        * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
+        * executable, everything else can be mapped with the XN bits
+        * set.
         */
-       if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
-               pr_err("System table signature incorrect\n");
-               retval = -EINVAL;
-               goto out;
-       }
-       if ((efi.systab->hdr.revision >> 16) < 2)
-               pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
-                       efi.systab->hdr.revision >> 16,
-                       efi.systab->hdr.revision & 0xffff);
-
-       /* Show what we know for posterity */
-       c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
-                            sizeof(vendor) * sizeof(efi_char16_t));
-       if (c16) {
-               for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
-                       vendor[i] = c16[i];
-               vendor[i] = '\0';
-               early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
-       }
-
-       pr_info("EFI v%u.%.02u by %s\n",
-               efi.systab->hdr.revision >> 16,
-               efi.systab->hdr.revision & 0xffff, vendor);
-
-       table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
-       config_tables = early_memremap(efi_to_phys(efi.systab->tables),
-                                      table_size);
-       if (config_tables == NULL) {
-               pr_warn("Unable to map EFI config table array.\n");
-               retval = -ENOMEM;
-               goto out;
-       }
-       retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
-                                        sizeof(efi_config_table_64_t), NULL);
-
-       early_memunmap(config_tables, table_size);
-out:
-       early_memunmap(efi.systab,  sizeof(efi_system_table_t));
-       return retval;
-}
-
-/*
- * Return true for RAM regions we want to permanently reserve.
- */
-static __init int is_reserve_region(efi_memory_desc_t *md)
-{
-       switch (md->type) {
-       case EFI_LOADER_CODE:
-       case EFI_LOADER_DATA:
-       case EFI_BOOT_SERVICES_CODE:
-       case EFI_BOOT_SERVICES_DATA:
-       case EFI_CONVENTIONAL_MEMORY:
-       case EFI_PERSISTENT_MEMORY:
-               return 0;
-       default:
-               break;
-       }
-       return is_normal_ram(md);
-}
-
-static __init void reserve_regions(void)
-{
-       efi_memory_desc_t *md;
-       u64 paddr, npages, size;
-
-       if (efi_enabled(EFI_DBG))
-               pr_info("Processing EFI memory map:\n");
-
-       for_each_efi_memory_desc(&memmap, md) {
-               paddr = md->phys_addr;
-               npages = md->num_pages;
-
-               if (efi_enabled(EFI_DBG)) {
-                       char buf[64];
-
-                       pr_info("  0x%012llx-0x%012llx %s",
-                               paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
-                               efi_md_typeattr_format(buf, sizeof(buf), md));
-               }
-
-               memrange_efi_to_native(&paddr, &npages);
-               size = npages << PAGE_SHIFT;
-
-               if (is_normal_ram(md))
-                       early_init_dt_add_memory_arch(paddr, size);
-
-               if (is_reserve_region(md)) {
-                       memblock_reserve(paddr, size);
-                       if (efi_enabled(EFI_DBG))
-                               pr_cont("*");
-               }
-
-               if (efi_enabled(EFI_DBG))
-                       pr_cont("\n");
-       }
-
-       set_bit(EFI_MEMMAP, &efi.flags);
-}
-
-void __init efi_init(void)
-{
-       struct efi_fdt_params params;
-
-       /* Grab UEFI information placed in FDT by stub */
-       if (!efi_get_fdt_params(&params))
-               return;
-
-       efi_system_table = params.system_table;
-
-       memblock_reserve(params.mmap & PAGE_MASK,
-                        PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK)));
-       memmap.phys_map = params.mmap;
-       memmap.map = early_memremap(params.mmap, params.mmap_size);
-       if (memmap.map == NULL) {
-               /*
-               * If we are booting via UEFI, the UEFI memory map is the only
-               * description of memory we have, so there is little point in
-               * proceeding if we cannot access it.
-               */
-               panic("Unable to map EFI memory map.\n");
-       }
-       memmap.map_end = memmap.map + params.mmap_size;
-       memmap.desc_size = params.desc_size;
-       memmap.desc_version = params.desc_ver;
-
-       if (uefi_init() < 0)
-               return;
-
-       reserve_regions();
-       early_memunmap(memmap.map, params.mmap_size);
-}
-
-static bool __init efi_virtmap_init(void)
-{
-       efi_memory_desc_t *md;
-
-       init_new_context(NULL, &efi_mm);
-
-       for_each_efi_memory_desc(&memmap, md) {
-               pgprot_t prot;
-
-               if (!(md->attribute & EFI_MEMORY_RUNTIME))
-                       continue;
-               if (md->virt_addr == 0)
-                       return false;
-
-               pr_info("  EFI remap 0x%016llx => %p\n",
-                       md->phys_addr, (void *)md->virt_addr);
-
-               /*
-                * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
-                * executable, everything else can be mapped with the XN bits
-                * set.
-                */
-               if (!is_normal_ram(md))
-                       prot = __pgprot(PROT_DEVICE_nGnRE);
-               else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
-                        !PAGE_ALIGNED(md->phys_addr))
-                       prot = PAGE_KERNEL_EXEC;
-               else
-                       prot = PAGE_KERNEL;
-
-               create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr,
-                                  md->num_pages << EFI_PAGE_SHIFT, 
-                                  __pgprot(pgprot_val(prot) | PTE_NG));
-       }
-       return true;
-}
-
-/*
- * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
- * non-early mapping of the UEFI system table and virtual mappings for all
- * EFI_MEMORY_RUNTIME regions.
- */
-static int __init arm64_enable_runtime_services(void)
-{
-       u64 mapsize;
-
-       if (!efi_enabled(EFI_BOOT)) {
-               pr_info("EFI services will not be available.\n");
-               return 0;
-       }
-
-       if (efi_runtime_disabled()) {
-               pr_info("EFI runtime services will be disabled.\n");
-               return 0;
-       }
-
-       pr_info("Remapping and enabling EFI services.\n");
-
-       mapsize = memmap.map_end - memmap.map;
-       memmap.map = (__force void *)ioremap_cache(memmap.phys_map,
-                                                  mapsize);
-       if (!memmap.map) {
-               pr_err("Failed to remap EFI memory map\n");
-               return -ENOMEM;
-       }
-       memmap.map_end = memmap.map + mapsize;
-       efi.memmap = &memmap;
-
-       efi.systab = (__force void *)ioremap_cache(efi_system_table,
-                                                  sizeof(efi_system_table_t));
-       if (!efi.systab) {
-               pr_err("Failed to remap EFI System Table\n");
-               return -ENOMEM;
-       }
-       set_bit(EFI_SYSTEM_TABLES, &efi.flags);
-
-       if (!efi_virtmap_init()) {
-               pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
-               return -ENOMEM;
-       }
-
-       /* Set up runtime services function pointers */
-       efi_native_runtime_setup();
-       set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
-
-       efi.runtime_version = efi.systab->hdr.revision;
-
+       if ((md->attribute & EFI_MEMORY_WB) == 0)
+               prot_val = PROT_DEVICE_nGnRE;
+       else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
+                !PAGE_ALIGNED(md->phys_addr))
+               prot_val = pgprot_val(PAGE_KERNEL_EXEC);
+       else
+               prot_val = pgprot_val(PAGE_KERNEL);
+
+       create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
+                          md->num_pages << EFI_PAGE_SHIFT,
+                          __pgprot(prot_val | PTE_NG));
        return 0;
 }
-early_initcall(arm64_enable_runtime_services);
 
 static int __init arm64_dmi_init(void)
 {
@@ -337,23 +54,6 @@ static int __init arm64_dmi_init(void)
 }
 core_initcall(arm64_dmi_init);
 
-static void efi_set_pgd(struct mm_struct *mm)
-{
-       switch_mm(NULL, mm, NULL);
-}
-
-void efi_virtmap_load(void)
-{
-       preempt_disable();
-       efi_set_pgd(&efi_mm);
-}
-
-void efi_virtmap_unload(void)
-{
-       efi_set_pgd(current->active_mm);
-       preempt_enable();
-}
-
 /*
  * UpdateCapsule() depends on the system being shutdown via
  * ResetSystem().
index d459133f9e3989509b9e59023b6830903a14ad27..8606895240bab606c806267aa815e43fc66318db 100644 (file)
@@ -29,7 +29,9 @@
 #include <asm/esr.h>
 #include <asm/irq.h>
 #include <asm/memory.h>
+#include <asm/ptrace.h>
 #include <asm/thread_info.h>
+#include <asm/uaccess.h>
 #include <asm/unistd.h>
 
 /*
        mrs     x22, elr_el1
        mrs     x23, spsr_el1
        stp     lr, x21, [sp, #S_LR]
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       /*
+        * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
+        * EL0, there is no need to check the state of TTBR0_EL1 since
+        * accesses are always enabled.
+        * Note that the meaning of this bit differs from the ARMv8.1 PAN
+        * feature as all TTBR0_EL1 accesses are disabled, not just those to
+        * user mappings.
+        */
+alternative_if_not ARM64_HAS_PAN
+       nop
+alternative_else
+       b       1f                              // skip TTBR0 PAN
+alternative_endif
+
+       .if     \el != 0
+       mrs     x21, ttbr0_el1
+       tst     x21, #0xffff << 48              // Check for the reserved ASID
+       orr     x23, x23, #PSR_PAN_BIT          // Set the emulated PAN in the saved SPSR
+       b.eq    1f                              // TTBR0 access already disabled
+       and     x23, x23, #~PSR_PAN_BIT         // Clear the emulated PAN in the saved SPSR
+       .endif
+
+       uaccess_ttbr0_disable x21
+1:
+#endif
+
        stp     x22, x23, [sp, #S_PC]
 
        /*
        ldp     x21, x22, [sp, #S_PC]           // load ELR, SPSR
        .if     \el == 0
        ct_user_enter
+       .endif
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       /*
+        * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
+        * PAN bit checking.
+        */
+alternative_if_not ARM64_HAS_PAN
+       nop
+alternative_else
+       b       2f                              // skip TTBR0 PAN
+alternative_endif
+
+       .if     \el != 0
+       tbnz    x22, #_PSR_PAN_BIT, 1f          // Skip re-enabling TTBR0 access if previously disabled
+       .endif
+
+       uaccess_ttbr0_enable x0
+
+       .if     \el == 0
+       /*
+        * Enable errata workarounds only if returning to user. The only
+        * workaround currently required for TTBR0_EL1 changes are for the
+        * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
+        * corruption).
+        */
+       post_ttbr0_update_workaround
+       .endif
+1:
+       .if     \el != 0
+       and     x22, x22, #~PSR_PAN_BIT         // ARMv8.0 CPUs do not understand this bit
+       .endif
+2:
+#endif
+
+       .if     \el == 0
        ldr     x23, [sp, #S_SP]                // load return stack pointer
        msr     sp_el0, x23
 #ifdef CONFIG_ARM64_ERRATUM_845719
@@ -165,6 +231,7 @@ alternative_else
 alternative_endif
 #endif
        .endif
+
        msr     elr_el1, x21                    // set up the return data
        msr     spsr_el1, x22
        ldp     x0, x1, [sp, #16 * 0]
@@ -187,10 +254,6 @@ alternative_endif
        eret                                    // return to kernel
        .endm
 
-       .macro  get_thread_info, rd
-       mrs     \rd, sp_el0
-       .endm
-
        .macro  irq_stack_entry
        mov     x19, sp                 // preserve the original sp
 
@@ -291,7 +354,7 @@ END(vectors)
  * Invalid mode handlers
  */
        .macro  inv_entry, el, reason, regsize = 64
-       kernel_entry el, \regsize
+       kernel_entry \el, \regsize
        mov     x0, sp
        mov     x1, #\reason
        mrs     x2, esr_el1
@@ -350,6 +413,8 @@ el1_sync:
        lsr     x24, x1, #ESR_ELx_EC_SHIFT      // exception class
        cmp     x24, #ESR_ELx_EC_DABT_CUR       // data abort in EL1
        b.eq    el1_da
+       cmp     x24, #ESR_ELx_EC_IABT_CUR       // instruction abort in EL1
+       b.eq    el1_ia
        cmp     x24, #ESR_ELx_EC_SYS64          // configurable trap
        b.eq    el1_undef
        cmp     x24, #ESR_ELx_EC_SP_ALIGN       // stack alignment exception
@@ -361,6 +426,11 @@ el1_sync:
        cmp     x24, #ESR_ELx_EC_BREAKPT_CUR    // debug exception in EL1
        b.ge    el1_dbg
        b       el1_inv
+
+el1_ia:
+       /*
+        * Fall through to the Data abort case
+        */
 el1_da:
        /*
         * Data abort handling
@@ -546,7 +616,7 @@ el0_ia:
        enable_dbg_and_irq
        ct_user_exit
        mov     x0, x26
-       orr     x1, x25, #1 << 24               // use reserved ISS bit for instruction aborts
+       mov     x1, x25
        mov     x2, sp
        bl      do_mem_abort
        b       ret_to_user
index 029c466eaa4c68cad67da330be9024ccfbbd82b4..8cfd5ab377434b2ccafd5b8d7c4701386faf8a95 100644 (file)
@@ -318,14 +318,14 @@ __create_page_tables:
         * dirty cache lines being evicted.
         */
        mov     x0, x25
-       add     x1, x26, #SWAPPER_DIR_SIZE
+       add     x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
        bl      __inval_cache_range
 
        /*
         * Clear the idmap and swapper page tables.
         */
        mov     x0, x25
-       add     x6, x26, #SWAPPER_DIR_SIZE
+       add     x6, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
 1:     stp     xzr, xzr, [x0], #16
        stp     xzr, xzr, [x0], #16
        stp     xzr, xzr, [x0], #16
@@ -404,7 +404,7 @@ __create_page_tables:
         * tables again to remove any speculatively loaded cache lines.
         */
        mov     x0, x25
-       add     x1, x26, #SWAPPER_DIR_SIZE
+       add     x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
        dmb     sy
        bl      __inval_cache_range
 
@@ -695,6 +695,9 @@ ENTRY(__enable_mmu)
        isb
        bl      __create_page_tables            // recreate kernel mapping
 
+       tlbi    vmalle1                         // Remove any stale TLB entries
+       dsb     nsh
+
        msr     sctlr_el1, x19                  // re-enable the MMU
        isb
        ic      iallu                           // flush instructions fetched
index 80624829db613961b7a088ce18d8591361b448c7..6f3fb46170bfdc9a22230e60776a92b629b3a5f2 100644 (file)
@@ -165,6 +165,70 @@ void machine_restart(char *cmd)
        while (1);
 }
 
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+       int     i, j;
+       int     nlines;
+       u32     *p;
+
+       /*
+        * don't attempt to dump non-kernel addresses or
+        * values that are probably just small negative numbers
+        */
+       if (addr < PAGE_OFFSET || addr > -256UL)
+               return;
+
+       printk("\n%s: %#lx:\n", name, addr);
+
+       /*
+        * round address down to a 32 bit boundary
+        * and always dump a multiple of 32 bytes
+        */
+       p = (u32 *)(addr & ~(sizeof(u32) - 1));
+       nbytes += (addr & (sizeof(u32) - 1));
+       nlines = (nbytes + 31) / 32;
+
+
+       for (i = 0; i < nlines; i++) {
+               /*
+                * just display low 16 bits of address to keep
+                * each line of the dump < 80 characters
+                */
+               printk("%04lx ", (unsigned long)p & 0xffff);
+               for (j = 0; j < 8; j++) {
+                       u32     data;
+                       if (probe_kernel_address(p, data)) {
+                               printk(" ********");
+                       } else {
+                               printk(" %08x", data);
+                       }
+                       ++p;
+               }
+               printk("\n");
+       }
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+       mm_segment_t fs;
+       unsigned int i;
+
+       fs = get_fs();
+       set_fs(KERNEL_DS);
+       show_data(regs->pc - nbytes, nbytes * 2, "PC");
+       show_data(regs->regs[30] - nbytes, nbytes * 2, "LR");
+       show_data(regs->sp - nbytes, nbytes * 2, "SP");
+       for (i = 0; i < 30; i++) {
+               char name[4];
+               snprintf(name, sizeof(name), "X%u", i);
+               show_data(regs->regs[i] - nbytes, nbytes * 2, name);
+       }
+       set_fs(fs);
+}
+
 void __show_regs(struct pt_regs *regs)
 {
        int i, top_reg;
@@ -191,6 +255,8 @@ void __show_regs(struct pt_regs *regs)
                if (i % 2 == 0)
                        printk("\n");
        }
+       if (!user_mode(regs))
+               show_extra_register_data(regs, 128);
        printk("\n");
 }
 
index 1e33d967c0ae5ce731a1980e1a41e6d7939ce568..0153c0d8ddb18e9cf94a17db4e230617eaad2b7b 100644 (file)
@@ -201,7 +201,7 @@ static void __init request_standard_resources(void)
        struct resource *res;
 
        kernel_code.start   = virt_to_phys(_text);
-       kernel_code.end     = virt_to_phys(_etext - 1);
+       kernel_code.end     = virt_to_phys(__init_begin - 1);
        kernel_data.start   = virt_to_phys(_sdata);
        kernel_data.end     = virt_to_phys(_end - 1);
 
@@ -346,6 +346,15 @@ void __init setup_arch(char **cmdline_p)
        smp_init_cpus();
        smp_build_mpidr_hash();
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       /*
+        * Make sure init_thread_info.ttbr0 always generates translation
+        * faults in case uaccess_enable() is inadvertently called by the init
+        * thread.
+        */
+       init_thread_info.ttbr0 = virt_to_phys(empty_zero_page);
+#endif
+
 #ifdef CONFIG_VT
 #if defined(CONFIG_VGA_CONSOLE)
        conswitchp = &vga_con;
index 694f6deedbab89509c26d9c9d05d8e6911cf0fbd..5b2c67a510d8a5ecbed84300b6118248432a3cc2 100644 (file)
 #include <linux/nodemask.h>
 #include <linux/of.h>
 #include <linux/sched.h>
+#include <linux/sched.h>
+#include <linux/sched_energy.h>
 
 #include <asm/cputype.h>
 #include <asm/topology.h>
 
+static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+
+unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu)
+{
+#ifdef CONFIG_CPU_FREQ
+       unsigned long max_freq_scale = cpufreq_scale_max_freq_capacity(cpu);
+
+       return per_cpu(cpu_scale, cpu) * max_freq_scale >> SCHED_CAPACITY_SHIFT;
+#else
+       return per_cpu(cpu_scale, cpu);
+#endif
+}
+
+static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
+{
+       per_cpu(cpu_scale, cpu) = capacity;
+}
+
 static int __init get_cpu_for_node(struct device_node *node)
 {
        struct device_node *cpu_node;
@@ -206,11 +226,67 @@ out:
 struct cpu_topology cpu_topology[NR_CPUS];
 EXPORT_SYMBOL_GPL(cpu_topology);
 
+/* sd energy functions */
+static inline
+const struct sched_group_energy * const cpu_cluster_energy(int cpu)
+{
+       struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL1];
+
+       if (!sge) {
+               pr_warn("Invalid sched_group_energy for Cluster%d\n", cpu);
+               return NULL;
+       }
+
+       return sge;
+}
+
+static inline
+const struct sched_group_energy * const cpu_core_energy(int cpu)
+{
+       struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL0];
+
+       if (!sge) {
+               pr_warn("Invalid sched_group_energy for CPU%d\n", cpu);
+               return NULL;
+       }
+
+       return sge;
+}
+
 const struct cpumask *cpu_coregroup_mask(int cpu)
 {
        return &cpu_topology[cpu].core_sibling;
 }
 
+static inline int cpu_corepower_flags(void)
+{
+       return SD_SHARE_PKG_RESOURCES  | SD_SHARE_POWERDOMAIN | \
+              SD_SHARE_CAP_STATES;
+}
+
+static struct sched_domain_topology_level arm64_topology[] = {
+#ifdef CONFIG_SCHED_MC
+       { cpu_coregroup_mask, cpu_corepower_flags, cpu_core_energy, SD_INIT_NAME(MC) },
+#endif
+       { cpu_cpu_mask, NULL, cpu_cluster_energy, SD_INIT_NAME(DIE) },
+       { NULL, },
+};
+
+static void update_cpu_capacity(unsigned int cpu)
+{
+       unsigned long capacity = SCHED_CAPACITY_SCALE;
+
+       if (cpu_core_energy(cpu)) {
+               int max_cap_idx = cpu_core_energy(cpu)->nr_cap_states - 1;
+               capacity = cpu_core_energy(cpu)->cap_states[max_cap_idx].cap;
+       }
+
+       set_capacity_scale(cpu, capacity);
+
+       pr_info("CPU%d: update cpu_capacity %lu\n",
+               cpu, arch_scale_cpu_capacity(NULL, cpu));
+}
+
 static void update_siblings_masks(unsigned int cpuid)
 {
        struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
@@ -272,6 +348,7 @@ void store_cpu_topology(unsigned int cpuid)
 
 topology_populated:
        update_siblings_masks(cpuid);
+       update_cpu_capacity(cpuid);
 }
 
 static void __init reset_cpu_topology(void)
@@ -302,4 +379,8 @@ void __init init_cpu_topology(void)
         */
        if (of_have_populated_dt() && parse_dt_topology())
                reset_cpu_topology();
+       else
+               set_sched_topology(arm64_topology);
+
+       init_sched_energy_costs();
 }
index c5392081b49ba4ac4f48d9a0782442ac888ed222..29d7b68f63f091a0d4266ed64501ab35a41b2d5c 100644 (file)
@@ -64,8 +64,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
 
        /*
         * We need to switch to kernel mode so that we can use __get_user
-        * to safely read from kernel space.  Note that we now dump the
-        * code first, just in case the backtrace kills us.
+        * to safely read from kernel space.
         */
        fs = get_fs();
        set_fs(KERNEL_DS);
@@ -111,21 +110,12 @@ static void dump_backtrace_entry(unsigned long where)
        print_ip_sym(where);
 }
 
-static void dump_instr(const char *lvl, struct pt_regs *regs)
+static void __dump_instr(const char *lvl, struct pt_regs *regs)
 {
        unsigned long addr = instruction_pointer(regs);
-       mm_segment_t fs;
        char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
        int i;
 
-       /*
-        * We need to switch to kernel mode so that we can use __get_user
-        * to safely read from kernel space.  Note that we now dump the
-        * code first, just in case the backtrace kills us.
-        */
-       fs = get_fs();
-       set_fs(KERNEL_DS);
-
        for (i = -4; i < 1; i++) {
                unsigned int val, bad;
 
@@ -139,8 +129,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
                }
        }
        printk("%sCode: %s\n", lvl, str);
+}
 
-       set_fs(fs);
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+       if (!user_mode(regs)) {
+               mm_segment_t fs = get_fs();
+               set_fs(KERNEL_DS);
+               __dump_instr(lvl, regs);
+               set_fs(fs);
+       } else {
+               __dump_instr(lvl, regs);
+       }
 }
 
 static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
@@ -465,7 +465,7 @@ static const char *esr_class_str[] = {
 
 const char *esr_get_class_string(u32 esr)
 {
-       return esr_class_str[esr >> ESR_ELx_EC_SHIFT];
+       return esr_class_str[ESR_ELx_EC(esr)];
 }
 
 /*
index 623532f44323026b4c2dfa54015f2e5714466ae0..7a5228c7abdd7cc9c8226df887a2ec9e7b4d053d 100644 (file)
@@ -133,12 +133,13 @@ SECTIONS
        }
 
        . = ALIGN(SEGMENT_ALIGN);
-       RO_DATA(PAGE_SIZE)              /* everything from this point to */
-       EXCEPTION_TABLE(8)              /* _etext will be marked RO NX   */
+       _etext = .;                     /* End of text section */
+
+       RO_DATA(PAGE_SIZE)              /* everything from this point to     */
+       EXCEPTION_TABLE(8)              /* __init_begin will be marked RO NX */
        NOTES
 
        . = ALIGN(SEGMENT_ALIGN);
-       _etext = .;                     /* End of text and rodata section */
        __init_begin = .;
 
        INIT_TEXT_SECTION(8)
@@ -193,6 +194,11 @@ SECTIONS
        swapper_pg_dir = .;
        . += SWAPPER_DIR_SIZE;
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       reserved_ttbr0 = .;
+       . += RESERVED_TTBR0_SIZE;
+#endif
+
        _end = .;
 
        STABS_DEBUG
index 5d1cad3ce6d601aa474ae9c9b8ef4c76a785912e..08b5f18ba604f99461f1c879bbecef8cdb1f53c5 100644 (file)
  */
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cpufeature.h>
 #include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
        .text
 
@@ -33,8 +33,7 @@
  * Alignment fixed up by hardware.
  */
 ENTRY(__clear_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
-           CONFIG_ARM64_PAN)
+       uaccess_enable_not_uao x2, x3
        mov     x2, x1                  // save the size for fixup return
        subs    x1, x1, #8
        b.mi    2f
@@ -54,8 +53,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
        b.mi    5f
 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
 5:     mov     x0, #0
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
-           CONFIG_ARM64_PAN)
+       uaccess_disable_not_uao x2
        ret
 ENDPROC(__clear_user)
 
index 0b90497d4424c59d0a9ce2dcf5642012f452d3c8..6505ec81f1da18ac52427db04821259135c7fbd2 100644 (file)
 
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cache.h>
 #include <asm/cpufeature.h>
 #include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
 /*
  * Copy from user space to a kernel buffer (alignment handled by the hardware)
 
 end    .req    x5
 ENTRY(__arch_copy_from_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
-           CONFIG_ARM64_PAN)
+       uaccess_enable_not_uao x3, x4
        add     end, x0, x2
 #include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
-           CONFIG_ARM64_PAN)
+       uaccess_disable_not_uao x3
        mov     x0, #0                          // Nothing to copy
        ret
 ENDPROC(__arch_copy_from_user)
index f7292dd08c840f27d39874fe7cc08aa89bdfb66d..9b04ff3ab6101d29ef8b6be48453260f9d3ffced 100644 (file)
 
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cache.h>
 #include <asm/cpufeature.h>
 #include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
 /*
  * Copy from user space to user space (alignment handled by the hardware)
 
 end    .req    x5
 ENTRY(__copy_in_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
-           CONFIG_ARM64_PAN)
+       uaccess_enable_not_uao x3, x4
        add     end, x0, x2
 #include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
-           CONFIG_ARM64_PAN)
+       uaccess_disable_not_uao x3
        mov     x0, #0
        ret
 ENDPROC(__copy_in_user)
index 7a7efe25503452bdfe8e4108b5f4aa0ad9495da5..8077e4f34d56b7f5b2b9a1ed9c5b53f23f573547 100644 (file)
 
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cache.h>
 #include <asm/cpufeature.h>
 #include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
 /*
  * Copy to user space from a kernel buffer (alignment handled by the hardware)
 
 end    .req    x5
 ENTRY(__arch_copy_to_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
-           CONFIG_ARM64_PAN)
+       uaccess_enable_not_uao x3, x4
        add     end, x0, x2
 #include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
-           CONFIG_ARM64_PAN)
+       uaccess_disable_not_uao x3
        mov     x0, #0
        ret
 ENDPROC(__arch_copy_to_user)
index 50ff9ba3a2367283e0340bb082c48f24678a5daa..07d7352d7c3896cb70d1640bb3215d09b3dc1ad6 100644 (file)
@@ -52,7 +52,7 @@ ENTRY(__flush_cache_user_range)
        sub     x3, x2, #1
        bic     x4, x0, x3
 1:
-USER(9f, dc    cvau, x4        )               // clean D line to PoU
+user_alt 9f, "dc cvau, x4",  "dc civac, x4",  ARM64_WORKAROUND_CLEAN_CACHE
        add     x4, x4, x2
        cmp     x4, x1
        b.lo    1b
index 7275628ba59f663489f6f9403d46ca8a5050c6f7..25128089c386b72298d913105f67c9ddd3da3892 100644 (file)
@@ -182,7 +182,12 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
        raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 
 switch_mm_fastpath:
-       cpu_switch_mm(mm->pgd, mm);
+       /*
+        * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
+        * emulating PAN.
+        */
+       if (!system_uses_ttbr0_pan())
+               cpu_switch_mm(mm->pgd, mm);
 }
 
 static int asids_init(void)
index a6e757cbab7785ed411e919b95c6d13caaf21726..925b2b3a06f872b54529b03c4ff9898184e618f6 100644 (file)
@@ -170,7 +170,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
        /* create a coherent mapping */
        page = virt_to_page(ptr);
        coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
-                                                  prot, NULL);
+                                                  prot, __builtin_return_address(0));
        if (!coherent_ptr)
                goto no_map;
 
index 6c16e4963b39092a30ad444f1862d39d270538ae..7e4423d22d2823e68a4f12f85fc12b297ae2f867 100644 (file)
@@ -153,6 +153,11 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
 }
 #endif
 
+static bool is_el1_instruction_abort(unsigned int esr)
+{
+       return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
+}
+
 /*
  * The kernel tried to access some page that wasn't present.
  */
@@ -161,8 +166,9 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
 {
        /*
         * Are we prepared to handle this kernel fault?
+        * We are almost certainly not prepared to handle instruction faults.
         */
-       if (fixup_exception(regs))
+       if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
                return;
 
        /*
@@ -224,8 +230,6 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
 #define VM_FAULT_BADMAP                0x010000
 #define VM_FAULT_BADACCESS     0x020000
 
-#define ESR_LNX_EXEC           (1 << 24)
-
 static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
                           unsigned int mm_flags, unsigned long vm_flags,
                           struct task_struct *tsk)
@@ -264,12 +268,24 @@ out:
        return fault;
 }
 
-static inline int permission_fault(unsigned int esr)
+static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs)
 {
-       unsigned int ec       = (esr & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT;
+       unsigned int ec       = ESR_ELx_EC(esr);
        unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
 
-       return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
+       if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
+               return false;
+
+       if (system_uses_ttbr0_pan())
+               return fsc_type == ESR_ELx_FSC_FAULT &&
+                       (regs->pstate & PSR_PAN_BIT);
+       else
+               return fsc_type == ESR_ELx_FSC_PERM;
+}
+
+static bool is_el0_instruction_abort(unsigned int esr)
+{
+       return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
 }
 
 static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
@@ -301,17 +317,20 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
        if (user_mode(regs))
                mm_flags |= FAULT_FLAG_USER;
 
-       if (esr & ESR_LNX_EXEC) {
+       if (is_el0_instruction_abort(esr)) {
                vm_flags = VM_EXEC;
        } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
                vm_flags = VM_WRITE;
                mm_flags |= FAULT_FLAG_WRITE;
        }
 
-       if (permission_fault(esr) && (addr < USER_DS)) {
+       if (addr < USER_DS && is_permission_fault(esr, regs)) {
                if (get_fs() == KERNEL_DS)
                        die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
 
+               if (is_el1_instruction_abort(esr))
+                       die("Attempting to execute userspace memory", regs, esr);
+
                if (!search_exception_tables(regs->pc))
                        die("Accessing user space memory outside uaccess.h routines", regs, esr);
        }
@@ -463,7 +482,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
        return 1;
 }
 
-static struct fault_info {
+static const struct fault_info {
        int     (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
        int     sig;
        int     code;
@@ -489,10 +508,10 @@ static struct fault_info {
        { do_bad,               SIGBUS,  0,             "unknown 17"                    },
        { do_bad,               SIGBUS,  0,             "unknown 18"                    },
        { do_bad,               SIGBUS,  0,             "unknown 19"                    },
-       { do_bad,               SIGBUS,  0,             "synchronous abort (translation table walk)" },
-       { do_bad,               SIGBUS,  0,             "synchronous abort (translation table walk)" },
-       { do_bad,               SIGBUS,  0,             "synchronous abort (translation table walk)" },
-       { do_bad,               SIGBUS,  0,             "synchronous abort (translation table walk)" },
+       { do_bad,               SIGBUS,  0,             "synchronous external abort (translation table walk)" },
+       { do_bad,               SIGBUS,  0,             "synchronous external abort (translation table walk)" },
+       { do_bad,               SIGBUS,  0,             "synchronous external abort (translation table walk)" },
+       { do_bad,               SIGBUS,  0,             "synchronous external abort (translation table walk)" },
        { do_bad,               SIGBUS,  0,             "synchronous parity error"      },
        { do_bad,               SIGBUS,  0,             "unknown 25"                    },
        { do_bad,               SIGBUS,  0,             "unknown 26"                    },
index 75728047b60b33f29dea212e58b309db8424e119..6788780779963d078c3ec611b1f870d500b1737f 100644 (file)
@@ -127,9 +127,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
 }
 
 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
+#define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1)
+
 int pfn_valid(unsigned long pfn)
 {
-       return memblock_is_memory(pfn << PAGE_SHIFT);
+       return (pfn & PFN_MASK) == pfn && memblock_is_map_memory(pfn << PAGE_SHIFT);
 }
 EXPORT_SYMBOL(pfn_valid);
 #endif
@@ -190,8 +192,12 @@ void __init arm64_memblock_init(void)
         */
        memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
                        ULLONG_MAX);
-       if (memblock_end_of_DRAM() > linear_region_size)
-               memblock_remove(0, memblock_end_of_DRAM() - linear_region_size);
+       if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
+               /* ensure that memstart_addr remains sufficiently aligned */
+               memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
+                                        ARM64_MEMSTART_ALIGN);
+               memblock_remove(0, memstart_addr);
+       }
 
        /*
         * Apply the memory limit if it was set. Since the kernel may be loaded
@@ -385,8 +391,8 @@ void __init mem_init(void)
                  MLM(MODULES_VADDR, MODULES_END),
                  MLG(VMALLOC_START, VMALLOC_END),
                  MLK_ROUNDUP(__init_begin, __init_end),
-                 MLK_ROUNDUP(_text, __start_rodata),
-                 MLK_ROUNDUP(__start_rodata, _etext),
+                 MLK_ROUNDUP(_text, _etext),
+                 MLK_ROUNDUP(__start_rodata, __init_begin),
                  MLK_ROUNDUP(_sdata, _edata),
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
                  MLG(VMEMMAP_START,
index ed177475dd8ce6abbee845f40e1cdbe3ebe72adb..232f787a088ae8e992c52cba450159a43a900174 100644 (file)
@@ -51,8 +51,12 @@ unsigned long arch_mmap_rnd(void)
 {
        unsigned long rnd;
 
-       rnd = (unsigned long)get_random_int() & STACK_RND_MASK;
-
+#ifdef CONFIG_COMPAT
+       if (test_thread_flag(TIF_32BIT))
+               rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
+       else
+#endif
+               rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
        return rnd << PAGE_SHIFT;
 }
 
index 8fc302d84e1f524aa0496fd213524a9d30602ebc..1cab2703f5a87e7074dbdbe8c932ff2317cf4bfe 100644 (file)
@@ -386,14 +386,14 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
 static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
 {
        unsigned long kernel_start = __pa(_text);
-       unsigned long kernel_end = __pa(_etext);
+       unsigned long kernel_end = __pa(__init_begin);
 
        /*
         * Take care not to create a writable alias for the
         * read-only text and rodata sections of the kernel image.
         */
 
-       /* No overlap with the kernel text */
+       /* No overlap with the kernel text/rodata */
        if (end < kernel_start || start >= kernel_end) {
                __create_pgd_mapping(pgd, start, __phys_to_virt(start),
                                     end - start, PAGE_KERNEL,
@@ -402,7 +402,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
        }
 
        /*
-        * This block overlaps the kernel text mapping.
+        * This block overlaps the kernel text/rodata mappings.
         * Map the portion(s) which don't overlap.
         */
        if (start < kernel_start)
@@ -417,7 +417,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
                                     early_pgtable_alloc);
 
        /*
-        * Map the linear alias of the [_text, _etext) interval as
+        * Map the linear alias of the [_text, __init_begin) interval as
         * read-only/non-executable. This makes the contents of the
         * region accessible to subsystems such as hibernate, but
         * protects it from inadvertent modification or execution.
@@ -438,6 +438,8 @@ static void __init map_mem(pgd_t *pgd)
 
                if (start >= end)
                        break;
+               if (memblock_is_nomap(reg))
+                       continue;
 
                __map_memblock(pgd, start, end);
        }
@@ -447,14 +449,14 @@ void mark_rodata_ro(void)
 {
        unsigned long section_size;
 
-       section_size = (unsigned long)__start_rodata - (unsigned long)_text;
+       section_size = (unsigned long)_etext - (unsigned long)_text;
        create_mapping_late(__pa(_text), (unsigned long)_text,
                            section_size, PAGE_KERNEL_ROX);
        /*
-        * mark .rodata as read only. Use _etext rather than __end_rodata to
-        * cover NOTES and EXCEPTION_TABLE.
+        * mark .rodata as read only. Use __init_begin rather than __end_rodata
+        * to cover NOTES and EXCEPTION_TABLE.
         */
-       section_size = (unsigned long)_etext - (unsigned long)__start_rodata;
+       section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
        create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
                            section_size, PAGE_KERNEL_RO);
 }
@@ -497,8 +499,8 @@ static void __init map_kernel(pgd_t *pgd)
 {
        static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data;
 
-       map_kernel_segment(pgd, _text, __start_rodata, PAGE_KERNEL_EXEC, &vmlinux_text);
-       map_kernel_segment(pgd, __start_rodata, _etext, PAGE_KERNEL, &vmlinux_rodata);
+       map_kernel_segment(pgd, _text, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
+       map_kernel_segment(pgd, __start_rodata, __init_begin, PAGE_KERNEL, &vmlinux_rodata);
        map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
                           &vmlinux_init);
        map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
deleted file mode 100644 (file)
index 984edcd..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Based on arch/arm/mm/proc-macros.S
- *
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-
-/*
- * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
- */
-       .macro  vma_vm_mm, rd, rn
-       ldr     \rd, [\rn, #VMA_VM_MM]
-       .endm
-
-/*
- * mmid - get context id from mm pointer (mm->context.id)
- */
-       .macro  mmid, rd, rn
-       ldr     \rd, [\rn, #MM_CONTEXT_ID]
-       .endm
-
-/*
- * dcache_line_size - get the minimum D-cache line size from the CTR register.
- */
-       .macro  dcache_line_size, reg, tmp
-       mrs     \tmp, ctr_el0                   // read CTR
-       ubfm    \tmp, \tmp, #16, #19            // cache line size encoding
-       mov     \reg, #4                        // bytes per word
-       lsl     \reg, \reg, \tmp                // actual cache line size
-       .endm
-
-/*
- * icache_line_size - get the minimum I-cache line size from the CTR register.
- */
-       .macro  icache_line_size, reg, tmp
-       mrs     \tmp, ctr_el0                   // read CTR
-       and     \tmp, \tmp, #0xf                // cache line size encoding
-       mov     \reg, #4                        // bytes per word
-       lsl     \reg, \reg, \tmp                // actual cache line size
-       .endm
-
-/*
- * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
- */
-       .macro  tcr_set_idmap_t0sz, valreg, tmpreg
-#ifndef CONFIG_ARM64_VA_BITS_48
-       ldr_l   \tmpreg, idmap_t0sz
-       bfi     \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
-#endif
-       .endm
-
-/*
- * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
- */
-       .macro  reset_pmuserenr_el0, tmpreg
-       mrs     \tmpreg, id_aa64dfr0_el1        // Check ID_AA64DFR0_EL1 PMUVer
-       sbfx    \tmpreg, \tmpreg, #8, #4
-       cmp     \tmpreg, #1                     // Skip if no PMU present
-       b.lt    9000f
-       msr     pmuserenr_el0, xzr              // Disable PMU access from EL0
-9000:
-       .endm
-
-/*
- * Macro to perform a data cache maintenance for the interval
- * [kaddr, kaddr + size)
- *
- *     op:             operation passed to dc instruction
- *     domain:         domain used in dsb instruciton
- *     kaddr:          starting virtual address of the region
- *     size:           size of the region
- *     Corrupts:       kaddr, size, tmp1, tmp2
- */
-       .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
-       dcache_line_size \tmp1, \tmp2
-       add     \size, \kaddr, \size
-       sub     \tmp2, \tmp1, #1
-       bic     \kaddr, \kaddr, \tmp2
-9998:  dc      \op, \kaddr
-       add     \kaddr, \kaddr, \tmp1
-       cmp     \kaddr, \size
-       b.lo    9998b
-       dsb     \domain
-       .endm
index 5bb61de2320172c806ee58959e3f721b2b243a99..8292784d44c95508c50be40b201454fafc488d2e 100644 (file)
@@ -125,17 +125,8 @@ ENTRY(cpu_do_switch_mm)
        bfi     x0, x1, #48, #16                // set the ASID
        msr     ttbr0_el1, x0                   // set TTBR0
        isb
-alternative_if_not ARM64_WORKAROUND_CAVIUM_27456
+       post_ttbr0_update_workaround
        ret
-       nop
-       nop
-       nop
-alternative_else
-       ic      iallu
-       dsb     nsh
-       isb
-       ret
-alternative_endif
 ENDPROC(cpu_do_switch_mm)
 
        .pushsection ".idmap.text", "ax"
index 8bbe9401f4f011d3239adcd2d6f5251d5fe37ff0..6d6e4af1a4bfb2e6354a242c0638a813d3f173d0 100644 (file)
@@ -49,6 +49,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/uaccess.h>
 #include <xen/interface/xen.h>
 
 
@@ -89,6 +90,24 @@ ENTRY(privcmd_call)
        mov x2, x3
        mov x3, x4
        mov x4, x5
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       /*
+        * Privcmd calls are issued by the userspace. The kernel needs to
+        * enable access to TTBR0_EL1 as the hypervisor would issue stage 1
+        * translations to user memory via AT instructions. Since AT
+        * instructions are not affected by the PAN bit (ARMv8.1), we only
+        * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
+        * is enabled (it implies that hardware UAO and PAN disabled).
+        */
+       uaccess_enable_not_uao x6, x7
+#endif
        hvc XEN_IMM
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       /*
+        * Disable userspace access from kernel once the hyp call completed.
+        */
+       uaccess_disable_not_uao x6
+#endif
        ret
 ENDPROC(privcmd_call);
diff --git a/arch/ia64/include/asm/early_ioremap.h b/arch/ia64/include/asm/early_ioremap.h
new file mode 100644 (file)
index 0000000..eec9e1d
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef _ASM_IA64_EARLY_IOREMAP_H
+#define _ASM_IA64_EARLY_IOREMAP_H
+
+extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
+#define early_memremap(phys_addr, size)        early_ioremap(phys_addr, size)
+
+extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
+#define early_memunmap(addr, size)             early_iounmap(addr, size)
+
+#endif
index 8fdb9c7eeb6641c7c538116f48bd69fec5ce2930..5de673ac9cb13602dc6e8cc31d9a78b38448bbf5 100644 (file)
@@ -20,6 +20,7 @@
  */
 
 #include <asm/unaligned.h>
+#include <asm/early_ioremap.h>
 
 /* We don't use IO slowdowns on the ia64, but.. */
 #define __SLOW_DOWN_IO do { } while (0)
@@ -427,10 +428,6 @@ __writeq (unsigned long val, volatile void __iomem *addr)
 extern void __iomem * ioremap(unsigned long offset, unsigned long size);
 extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
 extern void iounmap (volatile void __iomem *addr);
-extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
-#define early_memremap(phys_addr, size)        early_ioremap(phys_addr, size)
-extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
-#define early_memunmap(addr, size)             early_iounmap(addr, size)
 static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size)
 {
        return ioremap(phys_addr, size);
index 5c81fdd032c3b1269549f27e27348e9606eb5424..353037699512ca5515b11ce8fb2c808eb6386c78 100644 (file)
@@ -146,7 +146,7 @@ unsigned long arch_mmap_rnd(void)
 {
        unsigned long rnd;
 
-       rnd = (unsigned long)get_random_int();
+       rnd = get_random_long();
        rnd <<= PAGE_SHIFT;
        if (TASK_IS_32BIT_ADDR)
                rnd &= 0xfffffful;
@@ -174,7 +174,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 
 static inline unsigned long brk_rnd(void)
 {
-       unsigned long rnd = get_random_int();
+       unsigned long rnd = get_random_long();
 
        rnd = rnd << PAGE_SHIFT;
        /* 8MB for 32bit, 256MB for 64bit */
index cf788d7d7e56fdf59abc796874ed3886eb821217..b7abf3cd2a67e1ef1d7f29809bac0c844aaae12c 100644 (file)
@@ -1651,9 +1651,9 @@ static inline unsigned long brk_rnd(void)
 
        /* 8MB for 32bit, 1GB for 64bit */
        if (is_32bit_task())
-               rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+               rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
        else
-               rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+               rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
 
        return rnd << PAGE_SHIFT;
 }
index 0f0502e12f6c4c8accbe7fc28eb4db08158decfb..4087705ba90f34241200e2f30765794ea6b74b55 100644 (file)
@@ -59,9 +59,9 @@ unsigned long arch_mmap_rnd(void)
 
        /* 8MB for 32bit, 1GB for 64bit */
        if (is_32bit_task())
-               rnd = (unsigned long)get_random_int() % (1<<(23-PAGE_SHIFT));
+               rnd = get_random_long() % (1<<(23-PAGE_SHIFT));
        else
-               rnd = (unsigned long)get_random_int() % (1<<(30-PAGE_SHIFT));
+               rnd = get_random_long() % (1UL<<(30-PAGE_SHIFT));
 
        return rnd << PAGE_SHIFT;
 }
index c690c8e16a96ef2758fca4e9af8080ec7af6c17a..b489e9759518182b6a3884935e5a1c22b1af3524 100644 (file)
@@ -264,7 +264,7 @@ static unsigned long mmap_rnd(void)
        unsigned long rnd = 0UL;
 
        if (current->flags & PF_RANDOMIZE) {
-               unsigned long val = get_random_int();
+               unsigned long val = get_random_long();
                if (test_thread_flag(TIF_32BIT))
                        rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
                else
index 924bbffc56f090081e451ffd37ae23552e597076..68143221db3023ee14e3a7bc1dde4e8d6812a634 100644 (file)
@@ -46,6 +46,8 @@ config X86
        select ARCH_WANTS_DYNAMIC_TASK_STRUCT
        select ARCH_WANT_FRAME_POINTERS
        select ARCH_WANT_IPC_PARSE_VERSION      if X86_32
+       select HAVE_ARCH_MMAP_RND_BITS          if MMU
+       select HAVE_ARCH_MMAP_RND_COMPAT_BITS   if MMU && COMPAT
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select BUILDTIME_EXTABLE_SORT
        select CLKEVT_I8253
@@ -184,6 +186,20 @@ config HAVE_LATENCYTOP_SUPPORT
 config MMU
        def_bool y
 
+config ARCH_MMAP_RND_BITS_MIN
+       default 28 if 64BIT
+       default 8
+
+config ARCH_MMAP_RND_BITS_MAX
+       default 32 if 64BIT
+       default 16
+
+config ARCH_MMAP_RND_COMPAT_BITS_MIN
+       default 8
+
+config ARCH_MMAP_RND_COMPAT_BITS_MAX
+       default 16
+
 config SBUS
        bool
 
index 4086abca0b32345c92207fa75468cc98ebe97da4..53949c8863414c2dc8d77219b2b6c0c6d82d4fd9 100644 (file)
@@ -97,6 +97,8 @@ else
         KBUILD_CFLAGS += $(call cc-option,-mno-80387)
         KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
 
+        KBUILD_CFLAGS += -fno-pic
+
        # Use -mpreferred-stack-boundary=3 if supported.
        KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
 
diff --git a/arch/x86/configs/i386_ranchu_defconfig b/arch/x86/configs/i386_ranchu_defconfig
new file mode 100644 (file)
index 0000000..0206eb8
--- /dev/null
@@ -0,0 +1,423 @@
+# CONFIG_64BIT is not set
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_ARCH_MMAP_RND_BITS=16
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_SMP=y
+CONFIG_X86_BIGSMP=y
+CONFIG_MCORE2=y
+CONFIG_X86_GENERIC=y
+CONFIG_HPET_TIMER=y
+CONFIG_NR_CPUS=512
+CONFIG_PREEMPT=y
+# CONFIG_X86_MCE is not set
+CONFIG_X86_REBOOTFIXUPS=y
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+CONFIG_KSM=y
+CONFIG_CMA=y
+# CONFIG_MTRR_SANITIZER is not set
+CONFIG_EFI=y
+CONFIG_EFI_STUB=y
+CONFIG_HZ_100=y
+CONFIG_PHYSICAL_START=0x100000
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEASPM is not set
+CONFIG_PCCARD=y
+CONFIG_YENTA=y
+CONFIG_HOTPLUG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETLABEL=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=16
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_ISCSI_ATTRS=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_ATA_PIIX=y
+CONFIG_PATA_AMD=y
+CONFIG_PATA_OLDPIIX=y
+CONFIG_PATA_SCH=y
+CONFIG_PATA_MPIIX=y
+CONFIG_ATA_GENERIC=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_MIRROR=y
+CONFIG_DM_ZERO=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_NETCONSOLE=y
+CONFIG_TUN=y
+CONFIG_VIRTIO_NET=y
+CONFIG_BNX2=y
+CONFIG_TIGON3=y
+CONFIG_NET_TULIP=y
+CONFIG_E100=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_SKY2=y
+CONFIG_NE2K_PCI=y
+CONFIG_FORCEDETH=y
+CONFIG_8139TOO=y
+# CONFIG_8139TOO_PIO is not set
+CONFIG_R8169=y
+CONFIG_FDDI=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_USB_USBNET=y
+CONFIG_INPUT_POLLDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_INPUT_TABLET=y
+CONFIG_TABLET_USB_ACECAD=y
+CONFIG_TABLET_USB_AIPTEK=y
+CONFIG_TABLET_USB_GTCO=y
+CONFIG_TABLET_USB_HANWANG=y
+CONFIG_TABLET_USB_KBTAB=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_NVRAM=y
+CONFIG_I2C_I801=y
+CONFIG_BATTERY_GOLDFISH=y
+CONFIG_WATCHDOG=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_AGP=y
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+CONFIG_DRM=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_EFI=y
+CONFIG_FB_GOLDFISH=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_HOLTEK=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_PRINTER=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
+CONFIG_DMADEVICES=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_ION=y
+CONFIG_GOLDFISH_AUDIO=y
+CONFIG_GOLDFISH_SYNC=y
+CONFIG_SND_HDA_INTEL=y
+CONFIG_GOLDFISH=y
+CONFIG_GOLDFISH_PIPE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ISCSI_IBFT_FIND=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_FUSE_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=2048
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+CONFIG_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_CRYPTO_AES_586=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_ASYMMETRIC_KEY_TYPE=y
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+CONFIG_X509_CERTIFICATE_PARSER=y
+CONFIG_PKCS7_MESSAGE_PARSER=y
+CONFIG_PKCS7_TEST_KEY=y
+# CONFIG_VIRTUALIZATION is not set
+CONFIG_CRC_T10DIF=y
diff --git a/arch/x86/configs/x86_64_ranchu_defconfig b/arch/x86/configs/x86_64_ranchu_defconfig
new file mode 100644 (file)
index 0000000..dd38977
--- /dev/null
@@ -0,0 +1,418 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_ARCH_MMAP_RND_BITS=32
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_SMP=y
+CONFIG_MCORE2=y
+CONFIG_MAXSMP=y
+CONFIG_PREEMPT=y
+# CONFIG_X86_MCE is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+CONFIG_KSM=y
+CONFIG_CMA=y
+# CONFIG_MTRR_SANITIZER is not set
+CONFIG_EFI=y
+CONFIG_EFI_STUB=y
+CONFIG_HZ_100=y
+CONFIG_PHYSICAL_START=0x100000
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEASPM is not set
+CONFIG_PCCARD=y
+CONFIG_YENTA=y
+CONFIG_HOTPLUG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_IA32_EMULATION=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETLABEL=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DMA_CMA=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_ISCSI_ATTRS=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_ATA_PIIX=y
+CONFIG_PATA_AMD=y
+CONFIG_PATA_OLDPIIX=y
+CONFIG_PATA_SCH=y
+CONFIG_PATA_MPIIX=y
+CONFIG_ATA_GENERIC=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_MIRROR=y
+CONFIG_DM_ZERO=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_NETCONSOLE=y
+CONFIG_TUN=y
+CONFIG_VIRTIO_NET=y
+CONFIG_BNX2=y
+CONFIG_TIGON3=y
+CONFIG_NET_TULIP=y
+CONFIG_E100=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_SKY2=y
+CONFIG_NE2K_PCI=y
+CONFIG_FORCEDETH=y
+CONFIG_8139TOO=y
+# CONFIG_8139TOO_PIO is not set
+CONFIG_R8169=y
+CONFIG_FDDI=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_USB_USBNET=y
+CONFIG_INPUT_POLLDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_INPUT_TABLET=y
+CONFIG_TABLET_USB_ACECAD=y
+CONFIG_TABLET_USB_AIPTEK=y
+CONFIG_TABLET_USB_GTCO=y
+CONFIG_TABLET_USB_HANWANG=y
+CONFIG_TABLET_USB_KBTAB=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_NVRAM=y
+CONFIG_I2C_I801=y
+CONFIG_BATTERY_GOLDFISH=y
+CONFIG_WATCHDOG=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_AGP=y
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+CONFIG_DRM=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_EFI=y
+CONFIG_FB_GOLDFISH=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_HOLTEK=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_PRINTER=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
+CONFIG_DMADEVICES=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_ION=y
+CONFIG_GOLDFISH_AUDIO=y
+CONFIG_GOLDFISH_SYNC=y
+CONFIG_SND_HDA_INTEL=y
+CONFIG_GOLDFISH=y
+CONFIG_GOLDFISH_PIPE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ISCSI_IBFT_FIND=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_FUSE_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+CONFIG_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_ASYMMETRIC_KEY_TYPE=y
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+CONFIG_X509_CERTIFICATE_PARSER=y
+CONFIG_PKCS7_MESSAGE_PARSER=y
+CONFIG_PKCS7_TEST_KEY=y
+# CONFIG_VIRTUALIZATION is not set
+CONFIG_CRC_T10DIF=y
index c5d1785373ed38c25f1096467d103cd6440c1b66..02bab09707f28cc7dd2a10bc1a3b9af35f8e1aef 100644 (file)
@@ -1,13 +1,6 @@
 #ifndef _ASM_X86_IDLE_H
 #define _ASM_X86_IDLE_H
 
-#define IDLE_START 1
-#define IDLE_END 2
-
-struct notifier_block;
-void idle_notifier_register(struct notifier_block *n);
-void idle_notifier_unregister(struct notifier_block *n);
-
 #ifdef CONFIG_X86_64
 void enter_idle(void);
 void exit_idle(void);
index f0bb7c1f7d199413abc9280a19150912096d6aa9..7402eb4b509d3e601df3a254557fc134eaa1cc43 100644 (file)
@@ -706,7 +706,7 @@ __copy_from_user_overflow(int size, unsigned long count)
 
 #endif
 
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        int sz = __compiletime_object_size(to);
@@ -742,7 +742,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
        return n;
 }
 
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        int sz = __compiletime_object_size(from);
index 9f7c21c22477e59462d72e930d79a4c2a238a051..57eca132962f56531d3ca72b082f71be519206a1 100644 (file)
@@ -62,19 +62,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_tss);
 
 #ifdef CONFIG_X86_64
 static DEFINE_PER_CPU(unsigned char, is_idle);
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
-       atomic_notifier_chain_register(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_register);
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
-       atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
 #endif
 
 /*
@@ -251,14 +238,14 @@ static inline void play_dead(void)
 void enter_idle(void)
 {
        this_cpu_write(is_idle, 1);
-       atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+       idle_notifier_call_chain(IDLE_START);
 }
 
 static void __exit_idle(void)
 {
        if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
                return;
-       atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+       idle_notifier_call_chain(IDLE_END);
 }
 
 /* Called from interrupts to signify idle end */
index 307f60ecfc6de33b062cfe75914895e186ca9e67..d2dc0438d654a8bbcfc71583cea8f49f693afcff 100644 (file)
@@ -69,14 +69,14 @@ unsigned long arch_mmap_rnd(void)
 {
        unsigned long rnd;
 
-       /*
-        *  8 bits of randomness in 32bit mmaps, 20 address space bits
-        * 28 bits of randomness in 64bit mmaps, 40 address space bits
-        */
        if (mmap_is_ia32())
-               rnd = (unsigned long)get_random_int() % (1<<8);
+#ifdef CONFIG_COMPAT
+               rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
+#else
+               rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
+#endif
        else
-               rnd = (unsigned long)get_random_int() % (1<<28);
+               rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
 
        return rnd << PAGE_SHIFT;
 }
index 4fab5d6108056ced2547474a927118db4ce05fb7..25f25271b42a50b00b8d02c969b038d0b4bf5573 100644 (file)
@@ -40,6 +40,8 @@
 #include "blk.h"
 #include "blk-mq.h"
 
+#include <linux/math64.h>
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
@@ -3541,3 +3543,85 @@ int __init blk_dev_init(void)
 
        return 0;
 }
+
+/*
+ * Blk IO latency support. We want this to be as cheap as possible, so doing
+ * this lockless (and avoiding atomics), a few off by a few errors in this
+ * code is not harmful, and we don't want to do anything that is
+ * perf-impactful.
+ * TODO : If necessary, we can make the histograms per-cpu and aggregate
+ * them when printing them out.
+ */
+void
+blk_zero_latency_hist(struct io_latency_state *s)
+{
+       memset(s->latency_y_axis_read, 0,
+              sizeof(s->latency_y_axis_read));
+       memset(s->latency_y_axis_write, 0,
+              sizeof(s->latency_y_axis_write));
+       s->latency_reads_elems = 0;
+       s->latency_writes_elems = 0;
+}
+EXPORT_SYMBOL(blk_zero_latency_hist);
+
+ssize_t
+blk_latency_hist_show(struct io_latency_state *s, char *buf)
+{
+       int i;
+       int bytes_written = 0;
+       u_int64_t num_elem, elem;
+       int pct;
+
+       num_elem = s->latency_reads_elems;
+       if (num_elem > 0) {
+               bytes_written += scnprintf(buf + bytes_written,
+                          PAGE_SIZE - bytes_written,
+                          "IO svc_time Read Latency Histogram (n = %llu):\n",
+                          num_elem);
+               for (i = 0;
+                    i < ARRAY_SIZE(latency_x_axis_us);
+                    i++) {
+                       elem = s->latency_y_axis_read[i];
+                       pct = div64_u64(elem * 100, num_elem);
+                       bytes_written += scnprintf(buf + bytes_written,
+                                                  PAGE_SIZE - bytes_written,
+                                                  "\t< %5lluus%15llu%15d%%\n",
+                                                  latency_x_axis_us[i],
+                                                  elem, pct);
+               }
+               /* Last element in y-axis table is overflow */
+               elem = s->latency_y_axis_read[i];
+               pct = div64_u64(elem * 100, num_elem);
+               bytes_written += scnprintf(buf + bytes_written,
+                                          PAGE_SIZE - bytes_written,
+                                          "\t> %5dms%15llu%15d%%\n", 10,
+                                          elem, pct);
+       }
+       num_elem = s->latency_writes_elems;
+       if (num_elem > 0) {
+               bytes_written += scnprintf(buf + bytes_written,
+                          PAGE_SIZE - bytes_written,
+                          "IO svc_time Write Latency Histogram (n = %llu):\n",
+                          num_elem);
+               for (i = 0;
+                    i < ARRAY_SIZE(latency_x_axis_us);
+                    i++) {
+                       elem = s->latency_y_axis_write[i];
+                       pct = div64_u64(elem * 100, num_elem);
+                       bytes_written += scnprintf(buf + bytes_written,
+                                                  PAGE_SIZE - bytes_written,
+                                                  "\t< %5lluus%15llu%15d%%\n",
+                                                  latency_x_axis_us[i],
+                                                  elem, pct);
+               }
+               /* Last element in y-axis table is overflow */
+               elem = s->latency_y_axis_write[i];
+               pct = div64_u64(elem * 100, num_elem);
+               bytes_written += scnprintf(buf + bytes_written,
+                                          PAGE_SIZE - bytes_written,
+                                          "\t> %5dms%15llu%15d%%\n", 10,
+                                          elem, pct);
+       }
+       return bytes_written;
+}
+EXPORT_SYMBOL(blk_latency_hist_show);
index a5bed6bc869d66d7630a3d5a9f02a5c1adc54ede..fad9db981675c8a0f753127eb5dfa0f40dc881a4 100644 (file)
@@ -1118,6 +1118,22 @@ static void disk_release(struct device *dev)
                blk_put_queue(disk->queue);
        kfree(disk);
 }
+
+static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct gendisk *disk = dev_to_disk(dev);
+       struct disk_part_iter piter;
+       struct hd_struct *part;
+       int cnt = 0;
+
+       disk_part_iter_init(&piter, disk, 0);
+       while((part = disk_part_iter_next(&piter)))
+               cnt++;
+       disk_part_iter_exit(&piter);
+       add_uevent_var(env, "NPARTS=%u", cnt);
+       return 0;
+}
+
 struct class block_class = {
        .name           = "block",
 };
@@ -1137,6 +1153,7 @@ static struct device_type disk_type = {
        .groups         = disk_attr_groups,
        .release        = disk_release,
        .devnode        = block_devnode,
+       .uevent         = disk_uevent,
 };
 
 #ifdef CONFIG_PROC_FS
index a241e3900bc91d330002a39afe7febf3819c0650..91327dbfbb1d5f61002b6028914fc2e4bb81d1da 100644 (file)
@@ -216,10 +216,21 @@ static void part_release(struct device *dev)
        kfree(p);
 }
 
+static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct hd_struct *part = dev_to_part(dev);
+
+       add_uevent_var(env, "PARTN=%u", part->partno);
+       if (part->info && part->info->volname[0])
+               add_uevent_var(env, "PARTNAME=%s", part->info->volname);
+       return 0;
+}
+
 struct device_type part_type = {
        .name           = "partition",
        .groups         = part_attr_groups,
        .release        = part_release,
+       .uevent         = part_uevent,
 };
 
 static void delete_partition_rcu_cb(struct rcu_head *head)
index bdfc6c6f4f5a7c8d9363055be25954c66eb55d3f..a82fc022d34ba88ba7adeb35b6b4ee0fafdbdb72 100644 (file)
@@ -19,6 +19,18 @@ config ANDROID_BINDER_IPC
          Android process, using Binder to identify, invoke and pass arguments
          between said processes.
 
+config ANDROID_BINDER_DEVICES
+       string "Android Binder devices"
+       depends on ANDROID_BINDER_IPC
+       default "binder"
+       ---help---
+         Default value for the binder.devices parameter.
+
+         The binder.devices parameter is a comma-separated list of strings
+         that specifies the names of the binder device nodes that will be
+         created. Each binder device has its own context manager, and is
+         therefore logically separated from the other devices.
+
 config ANDROID_BINDER_IPC_32BIT
        bool
        depends on !64BIT && ANDROID_BINDER_IPC
index 47ddfefe24431b1274cbac8f1eb1318a8d6e7a08..a4a4268ff53efb2205ff1ea65505664212c9c6f2 100644 (file)
@@ -50,14 +50,13 @@ static DEFINE_MUTEX(binder_main_lock);
 static DEFINE_MUTEX(binder_deferred_lock);
 static DEFINE_MUTEX(binder_mmap_lock);
 
+static HLIST_HEAD(binder_devices);
 static HLIST_HEAD(binder_procs);
 static HLIST_HEAD(binder_deferred_list);
 static HLIST_HEAD(binder_dead_nodes);
 
 static struct dentry *binder_debugfs_dir_entry_root;
 static struct dentry *binder_debugfs_dir_entry_proc;
-static struct binder_node *binder_context_mgr_node;
-static kuid_t binder_context_mgr_uid = INVALID_UID;
 static int binder_last_id;
 static struct workqueue_struct *binder_deferred_workqueue;
 
@@ -116,6 +115,9 @@ module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
 static bool binder_debug_no_lock;
 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
 
+static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
+module_param_named(devices, binder_devices_param, charp, S_IRUGO);
+
 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
 static int binder_stop_on_user_error;
 
@@ -146,6 +148,17 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
                        binder_stop_on_user_error = 2; \
        } while (0)
 
+#define to_flat_binder_object(hdr) \
+       container_of(hdr, struct flat_binder_object, hdr)
+
+#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
+
+#define to_binder_buffer_object(hdr) \
+       container_of(hdr, struct binder_buffer_object, hdr)
+
+#define to_binder_fd_array_object(hdr) \
+       container_of(hdr, struct binder_fd_array_object, hdr)
+
 enum binder_stat_types {
        BINDER_STAT_PROC,
        BINDER_STAT_THREAD,
@@ -159,7 +172,7 @@ enum binder_stat_types {
 
 struct binder_stats {
        int br[_IOC_NR(BR_FAILED_REPLY) + 1];
-       int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+       int bc[_IOC_NR(BC_REPLY_SG) + 1];
        int obj_created[BINDER_STAT_COUNT];
        int obj_deleted[BINDER_STAT_COUNT];
 };
@@ -187,6 +200,7 @@ struct binder_transaction_log_entry {
        int to_node;
        int data_size;
        int offsets_size;
+       const char *context_name;
 };
 struct binder_transaction_log {
        int next;
@@ -211,6 +225,18 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
        return e;
 }
 
+struct binder_context {
+       struct binder_node *binder_context_mgr_node;
+       kuid_t binder_context_mgr_uid;
+       const char *name;
+};
+
+struct binder_device {
+       struct hlist_node hlist;
+       struct miscdevice miscdev;
+       struct binder_context context;
+};
+
 struct binder_work {
        struct list_head entry;
        enum {
@@ -283,6 +309,7 @@ struct binder_buffer {
        struct binder_node *target_node;
        size_t data_size;
        size_t offsets_size;
+       size_t extra_buffers_size;
        uint8_t data[0];
 };
 
@@ -326,6 +353,7 @@ struct binder_proc {
        int ready_threads;
        long default_priority;
        struct dentry *debugfs_entry;
+       struct binder_context *context;
 };
 
 enum {
@@ -649,7 +677,9 @@ err_no_vma:
 
 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
                                              size_t data_size,
-                                             size_t offsets_size, int is_async)
+                                             size_t offsets_size,
+                                             size_t extra_buffers_size,
+                                             int is_async)
 {
        struct rb_node *n = proc->free_buffers.rb_node;
        struct binder_buffer *buffer;
@@ -657,7 +687,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
        struct rb_node *best_fit = NULL;
        void *has_page_addr;
        void *end_page_addr;
-       size_t size;
+       size_t size, data_offsets_size;
 
        if (proc->vma == NULL) {
                pr_err("%d: binder_alloc_buf, no vma\n",
@@ -665,15 +695,20 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
                return NULL;
        }
 
-       size = ALIGN(data_size, sizeof(void *)) +
+       data_offsets_size = ALIGN(data_size, sizeof(void *)) +
                ALIGN(offsets_size, sizeof(void *));
 
-       if (size < data_size || size < offsets_size) {
+       if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
                binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
                                proc->pid, data_size, offsets_size);
                return NULL;
        }
-
+       size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
+       if (size < data_offsets_size || size < extra_buffers_size) {
+               binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
+                                 proc->pid, extra_buffers_size);
+               return NULL;
+       }
        if (is_async &&
            proc->free_async_space < size + sizeof(struct binder_buffer)) {
                binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
@@ -742,6 +777,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
                      proc->pid, size, buffer);
        buffer->data_size = data_size;
        buffer->offsets_size = offsets_size;
+       buffer->extra_buffers_size = extra_buffers_size;
        buffer->async_transaction = is_async;
        if (is_async) {
                proc->free_async_space -= size + sizeof(struct binder_buffer);
@@ -816,7 +852,8 @@ static void binder_free_buf(struct binder_proc *proc,
        buffer_size = binder_buffer_size(proc, buffer);
 
        size = ALIGN(buffer->data_size, sizeof(void *)) +
-               ALIGN(buffer->offsets_size, sizeof(void *));
+               ALIGN(buffer->offsets_size, sizeof(void *)) +
+               ALIGN(buffer->extra_buffers_size, sizeof(void *));
 
        binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
                     "%d: binder_free_buf %p size %zd buffer_size %zd\n",
@@ -930,8 +967,10 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
                if (internal) {
                        if (target_list == NULL &&
                            node->internal_strong_refs == 0 &&
-                           !(node == binder_context_mgr_node &&
-                           node->has_strong_ref)) {
+                           !(node->proc &&
+                             node == node->proc->context->
+                                     binder_context_mgr_node &&
+                             node->has_strong_ref)) {
                                pr_err("invalid inc strong node for %d\n",
                                        node->debug_id);
                                return -EINVAL;
@@ -1032,6 +1071,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
        struct rb_node **p = &proc->refs_by_node.rb_node;
        struct rb_node *parent = NULL;
        struct binder_ref *ref, *new_ref;
+       struct binder_context *context = proc->context;
 
        while (*p) {
                parent = *p;
@@ -1054,7 +1094,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
        rb_link_node(&new_ref->rb_node_node, parent, p);
        rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
 
-       new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+       new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
        for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
                ref = rb_entry(n, struct binder_ref, rb_node_desc);
                if (ref->desc > new_ref->desc)
@@ -1241,11 +1281,158 @@ static void binder_send_failed_reply(struct binder_transaction *t,
        }
 }
 
+/**
+ * binder_validate_object() - checks for a valid metadata object in a buffer.
+ * @buffer:    binder_buffer that we're parsing.
+ * @offset:    offset in the buffer at which to validate an object.
+ *
+ * Return:     If there's a valid metadata object at @offset in @buffer, the
+ *             size of that object. Otherwise, it returns zero.
+ */
+static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
+{
+       /* Check if we can read a header first */
+       struct binder_object_header *hdr;
+       size_t object_size = 0;
+
+       if (offset > buffer->data_size - sizeof(*hdr) ||
+           buffer->data_size < sizeof(*hdr) ||
+           !IS_ALIGNED(offset, sizeof(u32)))
+               return 0;
+
+       /* Ok, now see if we can read a complete object. */
+       hdr = (struct binder_object_header *)(buffer->data + offset);
+       switch (hdr->type) {
+       case BINDER_TYPE_BINDER:
+       case BINDER_TYPE_WEAK_BINDER:
+       case BINDER_TYPE_HANDLE:
+       case BINDER_TYPE_WEAK_HANDLE:
+               object_size = sizeof(struct flat_binder_object);
+               break;
+       case BINDER_TYPE_FD:
+               object_size = sizeof(struct binder_fd_object);
+               break;
+       case BINDER_TYPE_PTR:
+               object_size = sizeof(struct binder_buffer_object);
+               break;
+       case BINDER_TYPE_FDA:
+               object_size = sizeof(struct binder_fd_array_object);
+               break;
+       default:
+               return 0;
+       }
+       if (offset <= buffer->data_size - object_size &&
+           buffer->data_size >= object_size)
+               return object_size;
+       else
+               return 0;
+}
+
+/**
+ * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
+ * @b:         binder_buffer containing the object
+ * @index:     index in offset array at which the binder_buffer_object is
+ *             located
+ * @start:     points to the start of the offset array
+ * @num_valid: the number of valid offsets in the offset array
+ *
+ * Return:     If @index is within the valid range of the offset array
+ *             described by @start and @num_valid, and if there's a valid
+ *             binder_buffer_object at the offset found in index @index
+ *             of the offset array, that object is returned. Otherwise,
+ *             %NULL is returned.
+ *             Note that the offset found in index @index itself is not
+ *             verified; this function assumes that @num_valid elements
+ *             from @start were previously verified to have valid offsets.
+ */
+static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
+                                                       binder_size_t index,
+                                                       binder_size_t *start,
+                                                       binder_size_t num_valid)
+{
+       struct binder_buffer_object *buffer_obj;
+       binder_size_t *offp;
+
+       if (index >= num_valid)
+               return NULL;
+
+       offp = start + index;
+       buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
+       if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
+               return NULL;
+
+       return buffer_obj;
+}
+
+/**
+ * binder_validate_fixup() - validates pointer/fd fixups happen in order.
+ * @b:                 transaction buffer
+ * @objects_start      start of objects buffer
+ * @buffer:            binder_buffer_object in which to fix up
+ * @offset:            start offset in @buffer to fix up
+ * @last_obj:          last binder_buffer_object that we fixed up in
+ * @last_min_offset:   minimum fixup offset in @last_obj
+ *
+ * Return:             %true if a fixup in buffer @buffer at offset @offset is
+ *                     allowed.
+ *
+ * For safety reasons, we only allow fixups inside a buffer to happen
+ * at increasing offsets; additionally, we only allow fixup on the last
+ * buffer object that was verified, or one of its parents.
+ *
+ * Example of what is allowed:
+ *
+ * A
+ *   B (parent = A, offset = 0)
+ *   C (parent = A, offset = 16)
+ *     D (parent = C, offset = 0)
+ *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
+ *
+ * Examples of what is not allowed:
+ *
+ * Decreasing offsets within the same parent:
+ * A
+ *   C (parent = A, offset = 16)
+ *   B (parent = A, offset = 0) // decreasing offset within A
+ *
+ * Referring to a parent that wasn't the last object or any of its parents:
+ * A
+ *   B (parent = A, offset = 0)
+ *   C (parent = A, offset = 0)
+ *   C (parent = A, offset = 16)
+ *     D (parent = B, offset = 0) // B is not A or any of A's parents
+ */
+static bool binder_validate_fixup(struct binder_buffer *b,
+                                 binder_size_t *objects_start,
+                                 struct binder_buffer_object *buffer,
+                                 binder_size_t fixup_offset,
+                                 struct binder_buffer_object *last_obj,
+                                 binder_size_t last_min_offset)
+{
+       if (!last_obj) {
+               /* Nothing to fix up in */
+               return false;
+       }
+
+       while (last_obj != buffer) {
+               /*
+                * Safe to retrieve the parent of last_obj, since it
+                * was already previously verified by the driver.
+                */
+               if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
+                       return false;
+               last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
+               last_obj = (struct binder_buffer_object *)
+                       (b->data + *(objects_start + last_obj->parent));
+       }
+       return (fixup_offset >= last_min_offset);
+}
+
 static void binder_transaction_buffer_release(struct binder_proc *proc,
                                              struct binder_buffer *buffer,
                                              binder_size_t *failed_at)
 {
-       binder_size_t *offp, *off_end;
+       binder_size_t *offp, *off_start, *off_end;
        int debug_id = buffer->debug_id;
 
        binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -1256,28 +1443,30 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
        if (buffer->target_node)
                binder_dec_node(buffer->target_node, 1, 0);
 
-       offp = (binder_size_t *)(buffer->data +
-                                ALIGN(buffer->data_size, sizeof(void *)));
+       off_start = (binder_size_t *)(buffer->data +
+                                     ALIGN(buffer->data_size, sizeof(void *)));
        if (failed_at)
                off_end = failed_at;
        else
-               off_end = (void *)offp + buffer->offsets_size;
-       for (; offp < off_end; offp++) {
-               struct flat_binder_object *fp;
+               off_end = (void *)off_start + buffer->offsets_size;
+       for (offp = off_start; offp < off_end; offp++) {
+               struct binder_object_header *hdr;
+               size_t object_size = binder_validate_object(buffer, *offp);
 
-               if (*offp > buffer->data_size - sizeof(*fp) ||
-                   buffer->data_size < sizeof(*fp) ||
-                   !IS_ALIGNED(*offp, sizeof(u32))) {
-                       pr_err("transaction release %d bad offset %lld, size %zd\n",
+               if (object_size == 0) {
+                       pr_err("transaction release %d bad object at offset %lld, size %zd\n",
                               debug_id, (u64)*offp, buffer->data_size);
                        continue;
                }
-               fp = (struct flat_binder_object *)(buffer->data + *offp);
-               switch (fp->type) {
+               hdr = (struct binder_object_header *)(buffer->data + *offp);
+               switch (hdr->type) {
                case BINDER_TYPE_BINDER:
                case BINDER_TYPE_WEAK_BINDER: {
-                       struct binder_node *node = binder_get_node(proc, fp->binder);
+                       struct flat_binder_object *fp;
+                       struct binder_node *node;
 
+                       fp = to_flat_binder_object(hdr);
+                       node = binder_get_node(proc, fp->binder);
                        if (node == NULL) {
                                pr_err("transaction release %d bad node %016llx\n",
                                       debug_id, (u64)fp->binder);
@@ -1286,14 +1475,17 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
                        binder_debug(BINDER_DEBUG_TRANSACTION,
                                     "        node %d u%016llx\n",
                                     node->debug_id, (u64)node->ptr);
-                       binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
+                       binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
+                                       0);
                } break;
                case BINDER_TYPE_HANDLE:
                case BINDER_TYPE_WEAK_HANDLE: {
+                       struct flat_binder_object *fp;
                        struct binder_ref *ref;
 
+                       fp = to_flat_binder_object(hdr);
                        ref = binder_get_ref(proc, fp->handle,
-                                            fp->type == BINDER_TYPE_HANDLE);
+                                            hdr->type == BINDER_TYPE_HANDLE);
 
                        if (ref == NULL) {
                                pr_err("transaction release %d bad handle %d\n",
@@ -1303,31 +1495,348 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
                        binder_debug(BINDER_DEBUG_TRANSACTION,
                                     "        ref %d desc %d (node %d)\n",
                                     ref->debug_id, ref->desc, ref->node->debug_id);
-                       binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
+                       binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
                } break;
 
-               case BINDER_TYPE_FD:
+               case BINDER_TYPE_FD: {
+                       struct binder_fd_object *fp = to_binder_fd_object(hdr);
+
                        binder_debug(BINDER_DEBUG_TRANSACTION,
-                                    "        fd %d\n", fp->handle);
+                                    "        fd %d\n", fp->fd);
                        if (failed_at)
-                               task_close_fd(proc, fp->handle);
+                               task_close_fd(proc, fp->fd);
+               } break;
+               case BINDER_TYPE_PTR:
+                       /*
+                        * Nothing to do here, this will get cleaned up when the
+                        * transaction buffer gets freed
+                        */
                        break;
-
+               case BINDER_TYPE_FDA: {
+                       struct binder_fd_array_object *fda;
+                       struct binder_buffer_object *parent;
+                       uintptr_t parent_buffer;
+                       u32 *fd_array;
+                       size_t fd_index;
+                       binder_size_t fd_buf_size;
+
+                       fda = to_binder_fd_array_object(hdr);
+                       parent = binder_validate_ptr(buffer, fda->parent,
+                                                    off_start,
+                                                    offp - off_start);
+                       if (!parent) {
+                               pr_err("transaction release %d bad parent offset",
+                                      debug_id);
+                               continue;
+                       }
+                       /*
+                        * Since the parent was already fixed up, convert it
+                        * back to kernel address space to access it
+                        */
+                       parent_buffer = parent->buffer -
+                               proc->user_buffer_offset;
+
+                       fd_buf_size = sizeof(u32) * fda->num_fds;
+                       if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
+                               pr_err("transaction release %d invalid number of fds (%lld)\n",
+                                      debug_id, (u64)fda->num_fds);
+                               continue;
+                       }
+                       if (fd_buf_size > parent->length ||
+                           fda->parent_offset > parent->length - fd_buf_size) {
+                               /* No space for all file descriptors here. */
+                               pr_err("transaction release %d not enough space for %lld fds in buffer\n",
+                                      debug_id, (u64)fda->num_fds);
+                               continue;
+                       }
+                       fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+                       for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
+                               task_close_fd(proc, fd_array[fd_index]);
+               } break;
                default:
                        pr_err("transaction release %d bad object type %x\n",
-                               debug_id, fp->type);
+                               debug_id, hdr->type);
                        break;
                }
        }
 }
 
+static int binder_translate_binder(struct flat_binder_object *fp,
+                                  struct binder_transaction *t,
+                                  struct binder_thread *thread)
+{
+       struct binder_node *node;
+       struct binder_ref *ref;
+       struct binder_proc *proc = thread->proc;
+       struct binder_proc *target_proc = t->to_proc;
+
+       node = binder_get_node(proc, fp->binder);
+       if (!node) {
+               node = binder_new_node(proc, fp->binder, fp->cookie);
+               if (!node)
+                       return -ENOMEM;
+
+               node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+               node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+       }
+       if (fp->cookie != node->cookie) {
+               binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
+                                 proc->pid, thread->pid, (u64)fp->binder,
+                                 node->debug_id, (u64)fp->cookie,
+                                 (u64)node->cookie);
+               return -EINVAL;
+       }
+       if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
+               return -EPERM;
+
+       ref = binder_get_ref_for_node(target_proc, node);
+       if (!ref)
+               return -EINVAL;
+
+       if (fp->hdr.type == BINDER_TYPE_BINDER)
+               fp->hdr.type = BINDER_TYPE_HANDLE;
+       else
+               fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
+       fp->binder = 0;
+       fp->handle = ref->desc;
+       fp->cookie = 0;
+       binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
+
+       trace_binder_transaction_node_to_ref(t, node, ref);
+       binder_debug(BINDER_DEBUG_TRANSACTION,
+                    "        node %d u%016llx -> ref %d desc %d\n",
+                    node->debug_id, (u64)node->ptr,
+                    ref->debug_id, ref->desc);
+
+       return 0;
+}
+
+static int binder_translate_handle(struct flat_binder_object *fp,
+                                  struct binder_transaction *t,
+                                  struct binder_thread *thread)
+{
+       struct binder_ref *ref;
+       struct binder_proc *proc = thread->proc;
+       struct binder_proc *target_proc = t->to_proc;
+
+       ref = binder_get_ref(proc, fp->handle,
+                            fp->hdr.type == BINDER_TYPE_HANDLE);
+       if (!ref) {
+               binder_user_error("%d:%d got transaction with invalid handle, %d\n",
+                                 proc->pid, thread->pid, fp->handle);
+               return -EINVAL;
+       }
+       if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
+               return -EPERM;
+
+       if (ref->node->proc == target_proc) {
+               if (fp->hdr.type == BINDER_TYPE_HANDLE)
+                       fp->hdr.type = BINDER_TYPE_BINDER;
+               else
+                       fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
+               fp->binder = ref->node->ptr;
+               fp->cookie = ref->node->cookie;
+               binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
+                               0, NULL);
+               trace_binder_transaction_ref_to_node(t, ref);
+               binder_debug(BINDER_DEBUG_TRANSACTION,
+                            "        ref %d desc %d -> node %d u%016llx\n",
+                            ref->debug_id, ref->desc, ref->node->debug_id,
+                            (u64)ref->node->ptr);
+       } else {
+               struct binder_ref *new_ref;
+
+               new_ref = binder_get_ref_for_node(target_proc, ref->node);
+               if (!new_ref)
+                       return -EINVAL;
+
+               fp->binder = 0;
+               fp->handle = new_ref->desc;
+               fp->cookie = 0;
+               binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
+                              NULL);
+               trace_binder_transaction_ref_to_ref(t, ref, new_ref);
+               binder_debug(BINDER_DEBUG_TRANSACTION,
+                            "        ref %d desc %d -> ref %d desc %d (node %d)\n",
+                            ref->debug_id, ref->desc, new_ref->debug_id,
+                            new_ref->desc, ref->node->debug_id);
+       }
+       return 0;
+}
+
+static int binder_translate_fd(int fd,
+                              struct binder_transaction *t,
+                              struct binder_thread *thread,
+                              struct binder_transaction *in_reply_to)
+{
+       struct binder_proc *proc = thread->proc;
+       struct binder_proc *target_proc = t->to_proc;
+       int target_fd;
+       struct file *file;
+       int ret;
+       bool target_allows_fd;
+
+       if (in_reply_to)
+               target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
+       else
+               target_allows_fd = t->buffer->target_node->accept_fds;
+       if (!target_allows_fd) {
+               binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
+                                 proc->pid, thread->pid,
+                                 in_reply_to ? "reply" : "transaction",
+                                 fd);
+               ret = -EPERM;
+               goto err_fd_not_accepted;
+       }
+
+       file = fget(fd);
+       if (!file) {
+               binder_user_error("%d:%d got transaction with invalid fd, %d\n",
+                                 proc->pid, thread->pid, fd);
+               ret = -EBADF;
+               goto err_fget;
+       }
+       ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
+       if (ret < 0) {
+               ret = -EPERM;
+               goto err_security;
+       }
+
+       target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
+       if (target_fd < 0) {
+               ret = -ENOMEM;
+               goto err_get_unused_fd;
+       }
+       task_fd_install(target_proc, target_fd, file);
+       trace_binder_transaction_fd(t, fd, target_fd);
+       binder_debug(BINDER_DEBUG_TRANSACTION, "        fd %d -> %d\n",
+                    fd, target_fd);
+
+       return target_fd;
+
+err_get_unused_fd:
+err_security:
+       fput(file);
+err_fget:
+err_fd_not_accepted:
+       return ret;
+}
+
+static int binder_translate_fd_array(struct binder_fd_array_object *fda,
+                                    struct binder_buffer_object *parent,
+                                    struct binder_transaction *t,
+                                    struct binder_thread *thread,
+                                    struct binder_transaction *in_reply_to)
+{
+       binder_size_t fdi, fd_buf_size, num_installed_fds;
+       int target_fd;
+       uintptr_t parent_buffer;
+       u32 *fd_array;
+       struct binder_proc *proc = thread->proc;
+       struct binder_proc *target_proc = t->to_proc;
+
+       fd_buf_size = sizeof(u32) * fda->num_fds;
+       if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
+               binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
+                                 proc->pid, thread->pid, (u64)fda->num_fds);
+               return -EINVAL;
+       }
+       if (fd_buf_size > parent->length ||
+           fda->parent_offset > parent->length - fd_buf_size) {
+               /* No space for all file descriptors here. */
+               binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
+                                 proc->pid, thread->pid, (u64)fda->num_fds);
+               return -EINVAL;
+       }
+       /*
+        * Since the parent was already fixed up, convert it
+        * back to the kernel address space to access it
+        */
+       parent_buffer = parent->buffer - target_proc->user_buffer_offset;
+       fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+       if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
+               binder_user_error("%d:%d parent offset not aligned correctly.\n",
+                                 proc->pid, thread->pid);
+               return -EINVAL;
+       }
+       for (fdi = 0; fdi < fda->num_fds; fdi++) {
+               target_fd = binder_translate_fd(fd_array[fdi], t, thread,
+                                               in_reply_to);
+               if (target_fd < 0)
+                       goto err_translate_fd_failed;
+               fd_array[fdi] = target_fd;
+       }
+       return 0;
+
+err_translate_fd_failed:
+       /*
+        * Failed to allocate fd or security error, free fds
+        * installed so far.
+        */
+       num_installed_fds = fdi;
+       for (fdi = 0; fdi < num_installed_fds; fdi++)
+               task_close_fd(target_proc, fd_array[fdi]);
+       return target_fd;
+}
+
+static int binder_fixup_parent(struct binder_transaction *t,
+                              struct binder_thread *thread,
+                              struct binder_buffer_object *bp,
+                              binder_size_t *off_start,
+                              binder_size_t num_valid,
+                              struct binder_buffer_object *last_fixup_obj,
+                              binder_size_t last_fixup_min_off)
+{
+       struct binder_buffer_object *parent;
+       u8 *parent_buffer;
+       struct binder_buffer *b = t->buffer;
+       struct binder_proc *proc = thread->proc;
+       struct binder_proc *target_proc = t->to_proc;
+
+       if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
+               return 0;
+
+       parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
+       if (!parent) {
+               binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
+                                 proc->pid, thread->pid);
+               return -EINVAL;
+       }
+
+       if (!binder_validate_fixup(b, off_start,
+                                  parent, bp->parent_offset,
+                                  last_fixup_obj,
+                                  last_fixup_min_off)) {
+               binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
+                                 proc->pid, thread->pid);
+               return -EINVAL;
+       }
+
+       if (parent->length < sizeof(binder_uintptr_t) ||
+           bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
+               /* No space for a pointer here! */
+               binder_user_error("%d:%d got transaction with invalid parent offset\n",
+                                 proc->pid, thread->pid);
+               return -EINVAL;
+       }
+       parent_buffer = (u8 *)(parent->buffer -
+                              target_proc->user_buffer_offset);
+       *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
+
+       return 0;
+}
+
 static void binder_transaction(struct binder_proc *proc,
                               struct binder_thread *thread,
-                              struct binder_transaction_data *tr, int reply)
+                              struct binder_transaction_data *tr, int reply,
+                              binder_size_t extra_buffers_size)
 {
+       int ret;
        struct binder_transaction *t;
        struct binder_work *tcomplete;
-       binder_size_t *offp, *off_end;
+       binder_size_t *offp, *off_end, *off_start;
+       binder_size_t off_min;
+       u8 *sg_bufp, *sg_buf_end;
        struct binder_proc *target_proc;
        struct binder_thread *target_thread = NULL;
        struct binder_node *target_node = NULL;
@@ -1336,6 +1845,9 @@ static void binder_transaction(struct binder_proc *proc,
        struct binder_transaction *in_reply_to = NULL;
        struct binder_transaction_log_entry *e;
        uint32_t return_error;
+       struct binder_buffer_object *last_fixup_obj = NULL;
+       binder_size_t last_fixup_min_off = 0;
+       struct binder_context *context = proc->context;
 
        e = binder_transaction_log_add(&binder_transaction_log);
        e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
@@ -1344,6 +1856,7 @@ static void binder_transaction(struct binder_proc *proc,
        e->target_handle = tr->target.handle;
        e->data_size = tr->data_size;
        e->offsets_size = tr->offsets_size;
+       e->context_name = proc->context->name;
 
        if (reply) {
                in_reply_to = thread->transaction_stack;
@@ -1396,7 +1909,7 @@ static void binder_transaction(struct binder_proc *proc,
                        }
                        target_node = ref->node;
                } else {
-                       target_node = binder_context_mgr_node;
+                       target_node = context->binder_context_mgr_node;
                        if (target_node == NULL) {
                                return_error = BR_DEAD_REPLY;
                                goto err_no_context_mgr_node;
@@ -1463,20 +1976,22 @@ static void binder_transaction(struct binder_proc *proc,
 
        if (reply)
                binder_debug(BINDER_DEBUG_TRANSACTION,
-                            "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
+                            "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
                             proc->pid, thread->pid, t->debug_id,
                             target_proc->pid, target_thread->pid,
                             (u64)tr->data.ptr.buffer,
                             (u64)tr->data.ptr.offsets,
-                            (u64)tr->data_size, (u64)tr->offsets_size);
+                            (u64)tr->data_size, (u64)tr->offsets_size,
+                            (u64)extra_buffers_size);
        else
                binder_debug(BINDER_DEBUG_TRANSACTION,
-                            "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
+                            "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
                             proc->pid, thread->pid, t->debug_id,
                             target_proc->pid, target_node->debug_id,
                             (u64)tr->data.ptr.buffer,
                             (u64)tr->data.ptr.offsets,
-                            (u64)tr->data_size, (u64)tr->offsets_size);
+                            (u64)tr->data_size, (u64)tr->offsets_size,
+                            (u64)extra_buffers_size);
 
        if (!reply && !(tr->flags & TF_ONE_WAY))
                t->from = thread;
@@ -1492,7 +2007,8 @@ static void binder_transaction(struct binder_proc *proc,
        trace_binder_transaction(reply, t, target_node);
 
        t->buffer = binder_alloc_buf(target_proc, tr->data_size,
-               tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+               tr->offsets_size, extra_buffers_size,
+               !reply && (t->flags & TF_ONE_WAY));
        if (t->buffer == NULL) {
                return_error = BR_FAILED_REPLY;
                goto err_binder_alloc_buf_failed;
@@ -1505,8 +2021,9 @@ static void binder_transaction(struct binder_proc *proc,
        if (target_node)
                binder_inc_node(target_node, 1, 0, NULL);
 
-       offp = (binder_size_t *)(t->buffer->data +
-                                ALIGN(tr->data_size, sizeof(void *)));
+       off_start = (binder_size_t *)(t->buffer->data +
+                                     ALIGN(tr->data_size, sizeof(void *)));
+       offp = off_start;
 
        if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
                           tr->data.ptr.buffer, tr->data_size)) {
@@ -1528,171 +2045,138 @@ static void binder_transaction(struct binder_proc *proc,
                return_error = BR_FAILED_REPLY;
                goto err_bad_offset;
        }
-       off_end = (void *)offp + tr->offsets_size;
+       if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
+               binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
+                                 proc->pid, thread->pid,
+                                 extra_buffers_size);
+               return_error = BR_FAILED_REPLY;
+               goto err_bad_offset;
+       }
+       off_end = (void *)off_start + tr->offsets_size;
+       sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
+       sg_buf_end = sg_bufp + extra_buffers_size;
+       off_min = 0;
        for (; offp < off_end; offp++) {
-               struct flat_binder_object *fp;
-
-               if (*offp > t->buffer->data_size - sizeof(*fp) ||
-                   t->buffer->data_size < sizeof(*fp) ||
-                   !IS_ALIGNED(*offp, sizeof(u32))) {
-                       binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
-                                         proc->pid, thread->pid, (u64)*offp);
+               struct binder_object_header *hdr;
+               size_t object_size = binder_validate_object(t->buffer, *offp);
+
+               if (object_size == 0 || *offp < off_min) {
+                       binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
+                                         proc->pid, thread->pid, (u64)*offp,
+                                         (u64)off_min,
+                                         (u64)t->buffer->data_size);
                        return_error = BR_FAILED_REPLY;
                        goto err_bad_offset;
                }
-               fp = (struct flat_binder_object *)(t->buffer->data + *offp);
-               switch (fp->type) {
+
+               hdr = (struct binder_object_header *)(t->buffer->data + *offp);
+               off_min = *offp + object_size;
+               switch (hdr->type) {
                case BINDER_TYPE_BINDER:
                case BINDER_TYPE_WEAK_BINDER: {
-                       struct binder_ref *ref;
-                       struct binder_node *node = binder_get_node(proc, fp->binder);
+                       struct flat_binder_object *fp;
 
-                       if (node == NULL) {
-                               node = binder_new_node(proc, fp->binder, fp->cookie);
-                               if (node == NULL) {
-                                       return_error = BR_FAILED_REPLY;
-                                       goto err_binder_new_node_failed;
-                               }
-                               node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
-                               node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
-                       }
-                       if (fp->cookie != node->cookie) {
-                               binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
-                                       proc->pid, thread->pid,
-                                       (u64)fp->binder, node->debug_id,
-                                       (u64)fp->cookie, (u64)node->cookie);
+                       fp = to_flat_binder_object(hdr);
+                       ret = binder_translate_binder(fp, t, thread);
+                       if (ret < 0) {
                                return_error = BR_FAILED_REPLY;
-                               goto err_binder_get_ref_for_node_failed;
+                               goto err_translate_failed;
                        }
-                       if (security_binder_transfer_binder(proc->tsk,
-                                                           target_proc->tsk)) {
-                               return_error = BR_FAILED_REPLY;
-                               goto err_binder_get_ref_for_node_failed;
-                       }
-                       ref = binder_get_ref_for_node(target_proc, node);
-                       if (ref == NULL) {
-                               return_error = BR_FAILED_REPLY;
-                               goto err_binder_get_ref_for_node_failed;
-                       }
-                       if (fp->type == BINDER_TYPE_BINDER)
-                               fp->type = BINDER_TYPE_HANDLE;
-                       else
-                               fp->type = BINDER_TYPE_WEAK_HANDLE;
-                       fp->binder = 0;
-                       fp->handle = ref->desc;
-                       fp->cookie = 0;
-                       binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
-                                      &thread->todo);
-
-                       trace_binder_transaction_node_to_ref(t, node, ref);
-                       binder_debug(BINDER_DEBUG_TRANSACTION,
-                                    "        node %d u%016llx -> ref %d desc %d\n",
-                                    node->debug_id, (u64)node->ptr,
-                                    ref->debug_id, ref->desc);
                } break;
                case BINDER_TYPE_HANDLE:
                case BINDER_TYPE_WEAK_HANDLE: {
-                       struct binder_ref *ref;
+                       struct flat_binder_object *fp;
 
-                       ref = binder_get_ref(proc, fp->handle,
-                                            fp->type == BINDER_TYPE_HANDLE);
+                       fp = to_flat_binder_object(hdr);
+                       ret = binder_translate_handle(fp, t, thread);
+                       if (ret < 0) {
+                               return_error = BR_FAILED_REPLY;
+                               goto err_translate_failed;
+                       }
+               } break;
 
-                       if (ref == NULL) {
-                               binder_user_error("%d:%d got transaction with invalid handle, %d\n",
-                                               proc->pid,
-                                               thread->pid, fp->handle);
+               case BINDER_TYPE_FD: {
+                       struct binder_fd_object *fp = to_binder_fd_object(hdr);
+                       int target_fd = binder_translate_fd(fp->fd, t, thread,
+                                                           in_reply_to);
+
+                       if (target_fd < 0) {
                                return_error = BR_FAILED_REPLY;
-                               goto err_binder_get_ref_failed;
+                               goto err_translate_failed;
                        }
-                       if (security_binder_transfer_binder(proc->tsk,
-                                                           target_proc->tsk)) {
+                       fp->pad_binder = 0;
+                       fp->fd = target_fd;
+               } break;
+               case BINDER_TYPE_FDA: {
+                       struct binder_fd_array_object *fda =
+                               to_binder_fd_array_object(hdr);
+                       struct binder_buffer_object *parent =
+                               binder_validate_ptr(t->buffer, fda->parent,
+                                                   off_start,
+                                                   offp - off_start);
+                       if (!parent) {
+                               binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
+                                                 proc->pid, thread->pid);
                                return_error = BR_FAILED_REPLY;
-                               goto err_binder_get_ref_failed;
+                               goto err_bad_parent;
                        }
-                       if (ref->node->proc == target_proc) {
-                               if (fp->type == BINDER_TYPE_HANDLE)
-                                       fp->type = BINDER_TYPE_BINDER;
-                               else
-                                       fp->type = BINDER_TYPE_WEAK_BINDER;
-                               fp->binder = ref->node->ptr;
-                               fp->cookie = ref->node->cookie;
-                               binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
-                               trace_binder_transaction_ref_to_node(t, ref);
-                               binder_debug(BINDER_DEBUG_TRANSACTION,
-                                            "        ref %d desc %d -> node %d u%016llx\n",
-                                            ref->debug_id, ref->desc, ref->node->debug_id,
-                                            (u64)ref->node->ptr);
-                       } else {
-                               struct binder_ref *new_ref;
-
-                               new_ref = binder_get_ref_for_node(target_proc, ref->node);
-                               if (new_ref == NULL) {
-                                       return_error = BR_FAILED_REPLY;
-                                       goto err_binder_get_ref_for_node_failed;
-                               }
-                               fp->binder = 0;
-                               fp->handle = new_ref->desc;
-                               fp->cookie = 0;
-                               binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
-                               trace_binder_transaction_ref_to_ref(t, ref,
-                                                                   new_ref);
-                               binder_debug(BINDER_DEBUG_TRANSACTION,
-                                            "        ref %d desc %d -> ref %d desc %d (node %d)\n",
-                                            ref->debug_id, ref->desc, new_ref->debug_id,
-                                            new_ref->desc, ref->node->debug_id);
+                       if (!binder_validate_fixup(t->buffer, off_start,
+                                                  parent, fda->parent_offset,
+                                                  last_fixup_obj,
+                                                  last_fixup_min_off)) {
+                               binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
+                                                 proc->pid, thread->pid);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_bad_parent;
                        }
-               } break;
-
-               case BINDER_TYPE_FD: {
-                       int target_fd;
-                       struct file *file;
-
-                       if (reply) {
-                               if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
-                                       binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
-                                               proc->pid, thread->pid, fp->handle);
-                                       return_error = BR_FAILED_REPLY;
-                                       goto err_fd_not_allowed;
-                               }
-                       } else if (!target_node->accept_fds) {
-                               binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
-                                       proc->pid, thread->pid, fp->handle);
+                       ret = binder_translate_fd_array(fda, parent, t, thread,
+                                                       in_reply_to);
+                       if (ret < 0) {
                                return_error = BR_FAILED_REPLY;
-                               goto err_fd_not_allowed;
+                               goto err_translate_failed;
                        }
-
-                       file = fget(fp->handle);
-                       if (file == NULL) {
-                               binder_user_error("%d:%d got transaction with invalid fd, %d\n",
-                                       proc->pid, thread->pid, fp->handle);
+                       last_fixup_obj = parent;
+                       last_fixup_min_off =
+                               fda->parent_offset + sizeof(u32) * fda->num_fds;
+               } break;
+               case BINDER_TYPE_PTR: {
+                       struct binder_buffer_object *bp =
+                               to_binder_buffer_object(hdr);
+                       size_t buf_left = sg_buf_end - sg_bufp;
+
+                       if (bp->length > buf_left) {
+                               binder_user_error("%d:%d got transaction with too large buffer\n",
+                                                 proc->pid, thread->pid);
                                return_error = BR_FAILED_REPLY;
-                               goto err_fget_failed;
+                               goto err_bad_offset;
                        }
-                       if (security_binder_transfer_file(proc->tsk,
-                                                         target_proc->tsk,
-                                                         file) < 0) {
-                               fput(file);
+                       if (copy_from_user(sg_bufp,
+                                          (const void __user *)(uintptr_t)
+                                          bp->buffer, bp->length)) {
+                               binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
+                                                 proc->pid, thread->pid);
                                return_error = BR_FAILED_REPLY;
-                               goto err_get_unused_fd_failed;
+                               goto err_copy_data_failed;
                        }
-                       target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
-                       if (target_fd < 0) {
-                               fput(file);
+                       /* Fixup buffer pointer to target proc address space */
+                       bp->buffer = (uintptr_t)sg_bufp +
+                               target_proc->user_buffer_offset;
+                       sg_bufp += ALIGN(bp->length, sizeof(u64));
+
+                       ret = binder_fixup_parent(t, thread, bp, off_start,
+                                                 offp - off_start,
+                                                 last_fixup_obj,
+                                                 last_fixup_min_off);
+                       if (ret < 0) {
                                return_error = BR_FAILED_REPLY;
-                               goto err_get_unused_fd_failed;
+                               goto err_translate_failed;
                        }
-                       task_fd_install(target_proc, target_fd, file);
-                       trace_binder_transaction_fd(t, fp->handle, target_fd);
-                       binder_debug(BINDER_DEBUG_TRANSACTION,
-                                    "        fd %d -> %d\n", fp->handle, target_fd);
-                       /* TODO: fput? */
-                       fp->binder = 0;
-                       fp->handle = target_fd;
+                       last_fixup_obj = bp;
+                       last_fixup_min_off = 0;
                } break;
-
                default:
                        binder_user_error("%d:%d got transaction with invalid object type, %x\n",
-                               proc->pid, thread->pid, fp->type);
+                               proc->pid, thread->pid, hdr->type);
                        return_error = BR_FAILED_REPLY;
                        goto err_bad_object_type;
                }
@@ -1722,14 +2206,10 @@ static void binder_transaction(struct binder_proc *proc,
                wake_up_interruptible(target_wait);
        return;
 
-err_get_unused_fd_failed:
-err_fget_failed:
-err_fd_not_allowed:
-err_binder_get_ref_for_node_failed:
-err_binder_get_ref_failed:
-err_binder_new_node_failed:
+err_translate_failed:
 err_bad_object_type:
 err_bad_offset:
+err_bad_parent:
 err_copy_data_failed:
        trace_binder_transaction_failed_buffer_release(t->buffer);
        binder_transaction_buffer_release(target_proc, t->buffer, offp);
@@ -1773,6 +2253,7 @@ static int binder_thread_write(struct binder_proc *proc,
                        binder_size_t *consumed)
 {
        uint32_t cmd;
+       struct binder_context *context = proc->context;
        void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
        void __user *ptr = buffer + *consumed;
        void __user *end = buffer + size;
@@ -1799,10 +2280,10 @@ static int binder_thread_write(struct binder_proc *proc,
                        if (get_user(target, (uint32_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(uint32_t);
-                       if (target == 0 && binder_context_mgr_node &&
+                       if (target == 0 && context->binder_context_mgr_node &&
                            (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
                                ref = binder_get_ref_for_node(proc,
-                                              binder_context_mgr_node);
+                                       context->binder_context_mgr_node);
                                if (ref->desc != target) {
                                        binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
                                                proc->pid, thread->pid,
@@ -1947,6 +2428,17 @@ static int binder_thread_write(struct binder_proc *proc,
                        break;
                }
 
+               case BC_TRANSACTION_SG:
+               case BC_REPLY_SG: {
+                       struct binder_transaction_data_sg tr;
+
+                       if (copy_from_user(&tr, ptr, sizeof(tr)))
+                               return -EFAULT;
+                       ptr += sizeof(tr);
+                       binder_transaction(proc, thread, &tr.transaction_data,
+                                          cmd == BC_REPLY_SG, tr.buffers_size);
+                       break;
+               }
                case BC_TRANSACTION:
                case BC_REPLY: {
                        struct binder_transaction_data tr;
@@ -1954,7 +2446,8 @@ static int binder_thread_write(struct binder_proc *proc,
                        if (copy_from_user(&tr, ptr, sizeof(tr)))
                                return -EFAULT;
                        ptr += sizeof(tr);
-                       binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
+                       binder_transaction(proc, thread, &tr,
+                                          cmd == BC_REPLY, 0);
                        break;
                }
 
@@ -2708,9 +3201,11 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
 {
        int ret = 0;
        struct binder_proc *proc = filp->private_data;
+       struct binder_context *context = proc->context;
+
        kuid_t curr_euid = current_euid();
 
-       if (binder_context_mgr_node != NULL) {
+       if (context->binder_context_mgr_node) {
                pr_err("BINDER_SET_CONTEXT_MGR already set\n");
                ret = -EBUSY;
                goto out;
@@ -2718,27 +3213,27 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
        ret = security_binder_set_context_mgr(proc->tsk);
        if (ret < 0)
                goto out;
-       if (uid_valid(binder_context_mgr_uid)) {
-               if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
+       if (uid_valid(context->binder_context_mgr_uid)) {
+               if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
                        pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
                               from_kuid(&init_user_ns, curr_euid),
                               from_kuid(&init_user_ns,
-                                       binder_context_mgr_uid));
+                                        context->binder_context_mgr_uid));
                        ret = -EPERM;
                        goto out;
                }
        } else {
-               binder_context_mgr_uid = curr_euid;
+               context->binder_context_mgr_uid = curr_euid;
        }
-       binder_context_mgr_node = binder_new_node(proc, 0, 0);
-       if (binder_context_mgr_node == NULL) {
+       context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
+       if (!context->binder_context_mgr_node) {
                ret = -ENOMEM;
                goto out;
        }
-       binder_context_mgr_node->local_weak_refs++;
-       binder_context_mgr_node->local_strong_refs++;
-       binder_context_mgr_node->has_strong_ref = 1;
-       binder_context_mgr_node->has_weak_ref = 1;
+       context->binder_context_mgr_node->local_weak_refs++;
+       context->binder_context_mgr_node->local_strong_refs++;
+       context->binder_context_mgr_node->has_strong_ref = 1;
+       context->binder_context_mgr_node->has_weak_ref = 1;
 out:
        return ret;
 }
@@ -2959,6 +3454,7 @@ err_bad_arg:
 static int binder_open(struct inode *nodp, struct file *filp)
 {
        struct binder_proc *proc;
+       struct binder_device *binder_dev;
 
        binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
                     current->group_leader->pid, current->pid);
@@ -2971,6 +3467,9 @@ static int binder_open(struct inode *nodp, struct file *filp)
        INIT_LIST_HEAD(&proc->todo);
        init_waitqueue_head(&proc->wait);
        proc->default_priority = task_nice(current);
+       binder_dev = container_of(filp->private_data, struct binder_device,
+                                 miscdev);
+       proc->context = &binder_dev->context;
 
        binder_lock(__func__);
 
@@ -2986,8 +3485,17 @@ static int binder_open(struct inode *nodp, struct file *filp)
                char strbuf[11];
 
                snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+               /*
+                * proc debug entries are shared between contexts, so
+                * this will fail if the process tries to open the driver
+                * again with a different context. The priting code will
+                * anyway print all contexts that a given PID has, so this
+                * is not a problem.
+                */
                proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
-                       binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
+                       binder_debugfs_dir_entry_proc,
+                       (void *)(unsigned long)proc->pid,
+                       &binder_proc_fops);
        }
 
        return 0;
@@ -3080,6 +3588,7 @@ static int binder_node_release(struct binder_node *node, int refs)
 static void binder_deferred_release(struct binder_proc *proc)
 {
        struct binder_transaction *t;
+       struct binder_context *context = proc->context;
        struct rb_node *n;
        int threads, nodes, incoming_refs, outgoing_refs, buffers,
                active_transactions, page_count;
@@ -3089,11 +3598,12 @@ static void binder_deferred_release(struct binder_proc *proc)
 
        hlist_del(&proc->proc_node);
 
-       if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
+       if (context->binder_context_mgr_node &&
+           context->binder_context_mgr_node->proc == proc) {
                binder_debug(BINDER_DEBUG_DEAD_BINDER,
                             "%s: %d context_mgr_node gone\n",
                             __func__, proc->pid);
-               binder_context_mgr_node = NULL;
+               context->binder_context_mgr_node = NULL;
        }
 
        threads = 0;
@@ -3380,6 +3890,7 @@ static void print_binder_proc(struct seq_file *m,
        size_t header_pos;
 
        seq_printf(m, "proc %d\n", proc->pid);
+       seq_printf(m, "context %s\n", proc->context->name);
        header_pos = m->count;
 
        for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
@@ -3449,7 +3960,9 @@ static const char * const binder_command_strings[] = {
        "BC_EXIT_LOOPER",
        "BC_REQUEST_DEATH_NOTIFICATION",
        "BC_CLEAR_DEATH_NOTIFICATION",
-       "BC_DEAD_BINDER_DONE"
+       "BC_DEAD_BINDER_DONE",
+       "BC_TRANSACTION_SG",
+       "BC_REPLY_SG",
 };
 
 static const char * const binder_objstat_strings[] = {
@@ -3504,6 +4017,7 @@ static void print_binder_proc_stats(struct seq_file *m,
        int count, strong, weak;
 
        seq_printf(m, "proc %d\n", proc->pid);
+       seq_printf(m, "context %s\n", proc->context->name);
        count = 0;
        for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
                count++;
@@ -3610,13 +4124,19 @@ static int binder_transactions_show(struct seq_file *m, void *unused)
 
 static int binder_proc_show(struct seq_file *m, void *unused)
 {
-       struct binder_proc *proc = m->private;
+       struct binder_proc *itr;
+       int pid = (unsigned long)m->private;
        int do_lock = !binder_debug_no_lock;
 
        if (do_lock)
                binder_lock(__func__);
-       seq_puts(m, "binder proc state:\n");
-       print_binder_proc(m, proc, 1);
+
+       hlist_for_each_entry(itr, &binder_procs, proc_node) {
+               if (itr->pid == pid) {
+                       seq_puts(m, "binder proc state:\n");
+                       print_binder_proc(m, itr, 1);
+               }
+       }
        if (do_lock)
                binder_unlock(__func__);
        return 0;
@@ -3626,11 +4146,11 @@ static void print_binder_transaction_log_entry(struct seq_file *m,
                                        struct binder_transaction_log_entry *e)
 {
        seq_printf(m,
-                  "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
+                  "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
                   e->debug_id, (e->call_type == 2) ? "reply" :
                   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
-                  e->from_thread, e->to_proc, e->to_thread, e->to_node,
-                  e->target_handle, e->data_size, e->offsets_size);
+                  e->from_thread, e->to_proc, e->to_thread, e->context_name,
+                  e->to_node, e->target_handle, e->data_size, e->offsets_size);
 }
 
 static int binder_transaction_log_show(struct seq_file *m, void *unused)
@@ -3658,20 +4178,44 @@ static const struct file_operations binder_fops = {
        .release = binder_release,
 };
 
-static struct miscdevice binder_miscdev = {
-       .minor = MISC_DYNAMIC_MINOR,
-       .name = "binder",
-       .fops = &binder_fops
-};
-
 BINDER_DEBUG_ENTRY(state);
 BINDER_DEBUG_ENTRY(stats);
 BINDER_DEBUG_ENTRY(transactions);
 BINDER_DEBUG_ENTRY(transaction_log);
 
+static int __init init_binder_device(const char *name)
+{
+       int ret;
+       struct binder_device *binder_device;
+
+       binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
+       if (!binder_device)
+               return -ENOMEM;
+
+       binder_device->miscdev.fops = &binder_fops;
+       binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
+       binder_device->miscdev.name = name;
+
+       binder_device->context.binder_context_mgr_uid = INVALID_UID;
+       binder_device->context.name = name;
+
+       ret = misc_register(&binder_device->miscdev);
+       if (ret < 0) {
+               kfree(binder_device);
+               return ret;
+       }
+
+       hlist_add_head(&binder_device->hlist, &binder_devices);
+
+       return ret;
+}
+
 static int __init binder_init(void)
 {
        int ret;
+       char *device_name, *device_names;
+       struct binder_device *device;
+       struct hlist_node *tmp;
 
        binder_deferred_workqueue = create_singlethread_workqueue("binder");
        if (!binder_deferred_workqueue)
@@ -3681,7 +4225,7 @@ static int __init binder_init(void)
        if (binder_debugfs_dir_entry_root)
                binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
                                                 binder_debugfs_dir_entry_root);
-       ret = misc_register(&binder_miscdev);
+
        if (binder_debugfs_dir_entry_root) {
                debugfs_create_file("state",
                                    S_IRUGO,
@@ -3709,6 +4253,37 @@ static int __init binder_init(void)
                                    &binder_transaction_log_failed,
                                    &binder_transaction_log_fops);
        }
+
+       /*
+        * Copy the module_parameter string, because we don't want to
+        * tokenize it in-place.
+        */
+       device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
+       if (!device_names) {
+               ret = -ENOMEM;
+               goto err_alloc_device_names_failed;
+       }
+       strcpy(device_names, binder_devices_param);
+
+       while ((device_name = strsep(&device_names, ","))) {
+               ret = init_binder_device(device_name);
+               if (ret)
+                       goto err_init_binder_device_failed;
+       }
+
+       return ret;
+
+err_init_binder_device_failed:
+       hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
+               misc_deregister(&device->miscdev);
+               hlist_del(&device->hlist);
+               kfree(device);
+       }
+err_alloc_device_names_failed:
+       debugfs_remove_recursive(binder_debugfs_dir_entry_root);
+
+       destroy_workqueue(binder_deferred_workqueue);
+
        return ret;
 }
 
index a641cf3ccad691e76d9665e9defab4ef04e9af4f..9e425fbf83cbe0b342652b9976a334c93f5c2468 100644 (file)
@@ -205,6 +205,8 @@ static void driver_bound(struct device *dev)
 
        klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
 
+       device_pm_check_callbacks(dev);
+
        /*
         * Make sure the device is no longer in one of the deferred lists and
         * kick off retrying all pending devices
@@ -697,6 +699,7 @@ static void __device_release_driver(struct device *dev)
                        dev->pm_domain->dismiss(dev);
 
                klist_remove(&dev->p->knode_driver);
+               device_pm_check_callbacks(dev);
                if (dev->bus)
                        blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
                                                     BUS_NOTIFY_UNBOUND_DRIVER,
index e9b713675c7c49fb165cb5de2b20b2843041e694..6c5bc3fadfcfc5dda58bb4e48935be753b4233ab 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/cpufreq.h>
 #include <linux/cpuidle.h>
 #include <linux/timer.h>
+#include <linux/wakeup_reason.h>
 
 #include "../base.h"
 #include "power.h"
@@ -125,6 +126,7 @@ void device_pm_add(struct device *dev)
 {
        pr_debug("PM: Adding info for %s:%s\n",
                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
+       device_pm_check_callbacks(dev);
        mutex_lock(&dpm_list_mtx);
        if (dev->parent && dev->parent->power.is_prepared)
                dev_warn(dev, "parent %s should not be sleeping\n",
@@ -147,6 +149,7 @@ void device_pm_remove(struct device *dev)
        mutex_unlock(&dpm_list_mtx);
        device_wakeup_disable(dev);
        pm_runtime_remove(dev);
+       device_pm_check_callbacks(dev);
 }
 
 /**
@@ -1348,6 +1351,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        pm_callback_t callback = NULL;
        char *info = NULL;
        int error = 0;
+       char suspend_abort[MAX_SUSPEND_ABORT_LEN];
        DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 
        TRACE_DEVICE(dev);
@@ -1368,6 +1372,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
                pm_wakeup_event(dev, 0);
 
        if (pm_wakeup_pending()) {
+               pm_get_active_wakeup_sources(suspend_abort,
+                       MAX_SUSPEND_ABORT_LEN);
+               log_suspend_abort_reason(suspend_abort);
                async_error = -EBUSY;
                goto Complete;
        }
@@ -1570,6 +1577,11 @@ static int device_prepare(struct device *dev, pm_message_t state)
 
        dev->power.wakeup_path = device_may_wakeup(dev);
 
+       if (dev->power.no_pm_callbacks) {
+               ret = 1;        /* Let device go direct_complete */
+               goto unlock;
+       }
+
        if (dev->pm_domain) {
                info = "preparing power domain ";
                callback = dev->pm_domain->ops.prepare;
@@ -1592,6 +1604,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
        if (callback)
                ret = callback(dev);
 
+unlock:
        device_unlock(dev);
 
        if (ret < 0) {
@@ -1720,3 +1733,30 @@ void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
        device_pm_unlock();
 }
 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
+
+static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
+{
+       if (!ops)
+               return true;
+
+       return !ops->prepare &&
+              !ops->suspend &&
+              !ops->suspend_late &&
+              !ops->suspend_noirq &&
+              !ops->resume_noirq &&
+              !ops->resume_early &&
+              !ops->resume &&
+              !ops->complete;
+}
+
+void device_pm_check_callbacks(struct device *dev)
+{
+       spin_lock_irq(&dev->power.lock);
+       dev->power.no_pm_callbacks =
+               (!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
+               (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
+               (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
+               (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
+               (!dev->driver || pm_ops_is_empty(dev->driver->pm));
+       spin_unlock_irq(&dev->power.lock);
+}
index 998fa6b230844391b023b5ba8d4f0a22f9c645b0..297beae6431459458c509dcc4c497056156ddb64 100644 (file)
@@ -123,6 +123,7 @@ extern void device_pm_remove(struct device *);
 extern void device_pm_move_before(struct device *, struct device *);
 extern void device_pm_move_after(struct device *, struct device *);
 extern void device_pm_move_last(struct device *);
+extern void device_pm_check_callbacks(struct device *dev);
 
 #else /* !CONFIG_PM_SLEEP */
 
@@ -141,6 +142,8 @@ static inline void device_pm_move_after(struct device *deva,
                                        struct device *devb) {}
 static inline void device_pm_move_last(struct device *dev) {}
 
+static inline void device_pm_check_callbacks(struct device *dev) {}
+
 #endif /* !CONFIG_PM_SLEEP */
 
 static inline void device_pm_init(struct device *dev)
index a1e0b9ab847a345c6a09adab3ff9f2fd9af3ad3e..09c07f519952d7afa342fb8e1156c6f15497f9c8 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
 #include <linux/pm_wakeirq.h>
+#include <linux/types.h>
 #include <trace/events/power.h>
 
 #include "power.h"
@@ -804,6 +805,37 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
 }
 EXPORT_SYMBOL_GPL(pm_wakeup_event);
 
+void pm_get_active_wakeup_sources(char *pending_wakeup_source, size_t max)
+{
+       struct wakeup_source *ws, *last_active_ws = NULL;
+       int len = 0;
+       bool active = false;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+               if (ws->active && len < max) {
+                       if (!active)
+                               len += scnprintf(pending_wakeup_source, max,
+                                               "Pending Wakeup Sources: ");
+                       len += scnprintf(pending_wakeup_source + len, max - len,
+                               "%s ", ws->name);
+                       active = true;
+               } else if (!active &&
+                          (!last_active_ws ||
+                           ktime_to_ns(ws->last_time) >
+                           ktime_to_ns(last_active_ws->last_time))) {
+                       last_active_ws = ws;
+               }
+       }
+       if (!active && last_active_ws) {
+               scnprintf(pending_wakeup_source, max,
+                               "Last active Wakeup Source: %s",
+                               last_active_ws->name);
+       }
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources);
+
 void pm_print_active_wakeup_sources(void)
 {
        struct wakeup_source *ws;
index 8d98a329f6ea63a2daf179bb3f15e5307c6a0d13..96c34a95cc625929a08ca0e76c13b9b830a4907c 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/suspend.h>
 #include <trace/events/power.h>
+#include <linux/wakeup_reason.h>
 
 static LIST_HEAD(syscore_ops_list);
 static DEFINE_MUTEX(syscore_ops_lock);
@@ -75,6 +76,8 @@ int syscore_suspend(void)
        return 0;
 
  err_out:
+       log_suspend_abort_reason("System core suspend callback %pF failed",
+               ops->suspend);
        pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend);
 
        list_for_each_entry_continue(ops, &syscore_ops_list, node)
index 491a4dce13fef5962a9b07d6cef5c82fc3fc31d8..d93dfebae0bba58a7634d8dfc8e76aa97c0e515c 100644 (file)
@@ -1824,6 +1824,28 @@ unsigned int get_random_int(void)
 }
 EXPORT_SYMBOL(get_random_int);
 
+/*
+ * Same as get_random_int(), but returns unsigned long.
+ */
+unsigned long get_random_long(void)
+{
+       __u32 *hash;
+       unsigned long ret;
+
+       if (arch_get_random_long(&ret))
+               return ret;
+
+       hash = get_cpu_var(get_random_int_hash);
+
+       hash[0] += current->pid + jiffies + random_get_entropy();
+       md5_transform(hash, random_int_secret);
+       ret = *(unsigned long *)hash;
+       put_cpu_var(get_random_int_hash);
+
+       return ret;
+}
+EXPORT_SYMBOL(get_random_long);
+
 /*
  * randomize_range() returns a start address such that
  *
index 659879a56dbac59c7f18e0d2f51393bf9a3e645f..d43c401ff19046ad40fb482498bb2f89e55f2df0 100644 (file)
@@ -102,6 +102,24 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
          Be aware that not all cpufreq drivers support the conservative
          governor. If unsure have a look at the help section of the
          driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+       bool "interactive"
+       select CPU_FREQ_GOV_INTERACTIVE
+       help
+         Use the CPUFreq governor 'interactive' as default. This allows
+         you to get a full dynamic cpu frequency capable system by simply
+         loading your cpufreq low-level hardware driver, using the
+         'interactive' governor for latency-sensitive workloads.
+
+config CPU_FREQ_DEFAULT_GOV_SCHED
+       bool "sched"
+       select CPU_FREQ_GOV_SCHED
+       help
+         Use the CPUfreq governor 'sched' as default. This scales
+         cpu frequency using CPU utilization estimates from the
+         scheduler.
+
 endchoice
 
 config CPU_FREQ_GOV_PERFORMANCE
@@ -159,6 +177,20 @@ config CPU_FREQ_GOV_ONDEMAND
 
          If in doubt, say N.
 
+config CPU_FREQ_GOV_INTERACTIVE
+       bool "'interactive' cpufreq policy governor"
+       help
+         'interactive' - This driver adds a dynamic cpufreq policy governor
+         designed for latency-sensitive workloads.
+
+         This governor attempts to reduce the latency of clock
+         increases so that the system is more responsive to
+         interactive workloads.
+
+         For details, take a look at linux/Documentation/cpu-freq.
+
+         If in doubt, say N.
+
 config CPU_FREQ_GOV_CONSERVATIVE
        tristate "'conservative' cpufreq governor"
        depends on CPU_FREQ
@@ -183,6 +215,19 @@ config CPU_FREQ_GOV_CONSERVATIVE
 
          If in doubt, say N.
 
+config CPU_FREQ_GOV_SCHED
+       bool "'sched' cpufreq governor"
+       depends on CPU_FREQ
+       depends on SMP
+       select CPU_FREQ_GOV_COMMON
+       help
+         'sched' - this governor scales cpu frequency from the
+         scheduler as a function of cpu capacity utilization. It does
+         not evaluate utilization on a periodic basis (as ondemand
+         does) but instead is event-driven by the scheduler.
+
+         If in doubt, say N.
+
 comment "CPU frequency scaling drivers"
 
 config CPUFREQ_DT
index c0af1a1281c89134269445f9330d4d449c37135e..b02ae1463a151ac90cdfcdcde1ffca1f7c4f6917 100644 (file)
@@ -10,6 +10,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE)  += cpufreq_powersave.o
 obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE)   += cpufreq_userspace.o
 obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)    += cpufreq_ondemand.o
 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)        += cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
 obj-$(CONFIG_CPU_FREQ_GOV_COMMON)              += cpufreq_governor.o
 
 obj-$(CONFIG_CPUFREQ_DT)               += cpufreq-dt.o
index 8412ce5f93a712a03bfa81df25bee238a299d242..7b728143440dab3b24032b95095c6fb5494d5434 100644 (file)
@@ -29,6 +29,9 @@
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
 #include <linux/tick.h>
+#ifdef CONFIG_SMP
+#include <linux/sched.h>
+#endif
 #include <trace/events/power.h>
 
 static LIST_HEAD(cpufreq_policy_list);
@@ -154,6 +157,12 @@ bool have_governor_per_policy(void)
 }
 EXPORT_SYMBOL_GPL(have_governor_per_policy);
 
+bool cpufreq_driver_is_slow(void)
+{
+       return !(cpufreq_driver->flags & CPUFREQ_DRIVER_FAST);
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_is_slow);
+
 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
 {
        if (have_governor_per_policy())
@@ -347,6 +356,50 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
 #endif
 }
 
+/*********************************************************************
+ *               FREQUENCY INVARIANT CPU CAPACITY                    *
+ *********************************************************************/
+
+static DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
+static DEFINE_PER_CPU(unsigned long, max_freq_scale) = SCHED_CAPACITY_SCALE;
+
+static void
+scale_freq_capacity(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs)
+{
+       unsigned long cur = freqs ? freqs->new : policy->cur;
+       unsigned long scale = (cur << SCHED_CAPACITY_SHIFT) / policy->max;
+       struct cpufreq_cpuinfo *cpuinfo = &policy->cpuinfo;
+       int cpu;
+
+       pr_debug("cpus %*pbl cur/cur max freq %lu/%u kHz freq scale %lu\n",
+                cpumask_pr_args(policy->cpus), cur, policy->max, scale);
+
+       for_each_cpu(cpu, policy->cpus)
+               per_cpu(freq_scale, cpu) = scale;
+
+       if (freqs)
+               return;
+
+       scale = (policy->max << SCHED_CAPACITY_SHIFT) / cpuinfo->max_freq;
+
+       pr_debug("cpus %*pbl cur max/max freq %u/%u kHz max freq scale %lu\n",
+                cpumask_pr_args(policy->cpus), policy->max, cpuinfo->max_freq,
+                scale);
+
+       for_each_cpu(cpu, policy->cpus)
+               per_cpu(max_freq_scale, cpu) = scale;
+}
+
+unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu)
+{
+       return per_cpu(freq_scale, cpu);
+}
+
+unsigned long cpufreq_scale_max_freq_capacity(int cpu)
+{
+       return per_cpu(max_freq_scale, cpu);
+}
+
 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
                struct cpufreq_freqs *freqs, unsigned int state)
 {
@@ -423,6 +476,9 @@ static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
                struct cpufreq_freqs *freqs)
 {
+#ifdef CONFIG_SMP
+       int cpu;
+#endif
 
        /*
         * Catch double invocations of _begin() which lead to self-deadlock.
@@ -450,6 +506,12 @@ wait:
 
        spin_unlock(&policy->transition_lock);
 
+       scale_freq_capacity(policy, freqs);
+#ifdef CONFIG_SMP
+       for_each_cpu(cpu, policy->cpus)
+               trace_cpu_capacity(capacity_curr_of(cpu), cpu);
+#endif
+
        cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
 }
 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
@@ -2126,8 +2188,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                        CPUFREQ_NOTIFY, new_policy);
 
+       scale_freq_capacity(new_policy, NULL);
+
        policy->min = new_policy->min;
        policy->max = new_policy->max;
+       trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu);
 
        pr_debug("new min and max freqs are %u - %u kHz\n",
                 policy->min, policy->max);
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
new file mode 100644 (file)
index 0000000..f2929e6
--- /dev/null
@@ -0,0 +1,1367 @@
+/*
+ * drivers/cpufreq/cpufreq_interactive.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/tick.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_interactive.h>
+
+struct cpufreq_interactive_cpuinfo {
+       struct timer_list cpu_timer;
+       struct timer_list cpu_slack_timer;
+       spinlock_t load_lock; /* protects the next 4 fields */
+       u64 time_in_idle;
+       u64 time_in_idle_timestamp;
+       u64 cputime_speedadj;
+       u64 cputime_speedadj_timestamp;
+       struct cpufreq_policy *policy;
+       struct cpufreq_frequency_table *freq_table;
+       spinlock_t target_freq_lock; /*protects target freq */
+       unsigned int target_freq;
+       unsigned int floor_freq;
+       u64 pol_floor_val_time; /* policy floor_validate_time */
+       u64 loc_floor_val_time; /* per-cpu floor_validate_time */
+       u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
+       u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
+       struct rw_semaphore enable_sem;
+       int governor_enabled;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
+
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
+static struct mutex gov_lock;
+
+/* Target load.  Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
+
+#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
+#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
+static unsigned int default_above_hispeed_delay[] = {
+       DEFAULT_ABOVE_HISPEED_DELAY };
+
+struct cpufreq_interactive_tunables {
+       int usage_count;
+       /* Hi speed to bump to from lo speed when load burst (default max) */
+       unsigned int hispeed_freq;
+       /* Go to hi speed when CPU load at or above this value. */
+#define DEFAULT_GO_HISPEED_LOAD 99
+       unsigned long go_hispeed_load;
+       /* Target load. Lower values result in higher CPU speeds. */
+       spinlock_t target_loads_lock;
+       unsigned int *target_loads;
+       int ntarget_loads;
+       /*
+        * The minimum amount of time to spend at a frequency before we can ramp
+        * down.
+        */
+#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
+       unsigned long min_sample_time;
+       /*
+        * The sample rate of the timer used to increase frequency
+        */
+       unsigned long timer_rate;
+       /*
+        * Wait this long before raising speed above hispeed, by default a
+        * single timer interval.
+        */
+       spinlock_t above_hispeed_delay_lock;
+       unsigned int *above_hispeed_delay;
+       int nabove_hispeed_delay;
+       /* Non-zero means indefinite speed boost active */
+       int boost_val;
+       /* Duration of a boot pulse in usecs */
+       int boostpulse_duration_val;
+       /* End time of boost pulse in ktime converted to usecs */
+       u64 boostpulse_endtime;
+       bool boosted;
+       /*
+        * Max additional time to wait in idle, beyond timer_rate, at speeds
+        * above minimum before wakeup to reduce speed, or -1 if unnecessary.
+        */
+#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
+       int timer_slack_val;
+       bool io_is_busy;
+};
+
+/* For cases where we have single governor instance for system */
+static struct cpufreq_interactive_tunables *common_tunables;
+
+static struct attribute_group *get_sysfs_attr(void);
+
+static void cpufreq_interactive_timer_resched(
+       struct cpufreq_interactive_cpuinfo *pcpu)
+{
+       struct cpufreq_interactive_tunables *tunables =
+               pcpu->policy->governor_data;
+       unsigned long expires;
+       unsigned long flags;
+
+       spin_lock_irqsave(&pcpu->load_lock, flags);
+       pcpu->time_in_idle =
+               get_cpu_idle_time(smp_processor_id(),
+                                 &pcpu->time_in_idle_timestamp,
+                                 tunables->io_is_busy);
+       pcpu->cputime_speedadj = 0;
+       pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+       expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
+       mod_timer_pinned(&pcpu->cpu_timer, expires);
+
+       if (tunables->timer_slack_val >= 0 &&
+           pcpu->target_freq > pcpu->policy->min) {
+               expires += usecs_to_jiffies(tunables->timer_slack_val);
+               mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
+       }
+
+       spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+/* The caller shall take enable_sem write semaphore to avoid any timer race.
+ * The cpu_timer and cpu_slack_timer must be deactivated when calling this
+ * function.
+ */
+static void cpufreq_interactive_timer_start(
+       struct cpufreq_interactive_tunables *tunables, int cpu)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+       unsigned long expires = jiffies +
+               usecs_to_jiffies(tunables->timer_rate);
+       unsigned long flags;
+
+       pcpu->cpu_timer.expires = expires;
+       add_timer_on(&pcpu->cpu_timer, cpu);
+       if (tunables->timer_slack_val >= 0 &&
+           pcpu->target_freq > pcpu->policy->min) {
+               expires += usecs_to_jiffies(tunables->timer_slack_val);
+               pcpu->cpu_slack_timer.expires = expires;
+               add_timer_on(&pcpu->cpu_slack_timer, cpu);
+       }
+
+       spin_lock_irqsave(&pcpu->load_lock, flags);
+       pcpu->time_in_idle =
+               get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
+                                 tunables->io_is_busy);
+       pcpu->cputime_speedadj = 0;
+       pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+       spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+static unsigned int freq_to_above_hispeed_delay(
+       struct cpufreq_interactive_tunables *tunables,
+       unsigned int freq)
+{
+       int i;
+       unsigned int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+       for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
+                       freq >= tunables->above_hispeed_delay[i+1]; i += 2)
+               ;
+
+       ret = tunables->above_hispeed_delay[i];
+       spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+       return ret;
+}
+
+static unsigned int freq_to_targetload(
+       struct cpufreq_interactive_tunables *tunables, unsigned int freq)
+{
+       int i;
+       unsigned int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tunables->target_loads_lock, flags);
+
+       for (i = 0; i < tunables->ntarget_loads - 1 &&
+                   freq >= tunables->target_loads[i+1]; i += 2)
+               ;
+
+       ret = tunables->target_loads[i];
+       spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+       return ret;
+}
+
+/*
+ * If increasing frequencies never map to a lower target load then
+ * choose_freq() will find the minimum frequency that does not exceed its
+ * target load given the current load.
+ */
+static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
+               unsigned int loadadjfreq)
+{
+       unsigned int freq = pcpu->policy->cur;
+       unsigned int prevfreq, freqmin, freqmax;
+       unsigned int tl;
+       int index;
+
+       freqmin = 0;
+       freqmax = UINT_MAX;
+
+       do {
+               prevfreq = freq;
+               tl = freq_to_targetload(pcpu->policy->governor_data, freq);
+
+               /*
+                * Find the lowest frequency where the computed load is less
+                * than or equal to the target load.
+                */
+
+               if (cpufreq_frequency_table_target(
+                           pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
+                           CPUFREQ_RELATION_L, &index))
+                       break;
+               freq = pcpu->freq_table[index].frequency;
+
+               if (freq > prevfreq) {
+                       /* The previous frequency is too low. */
+                       freqmin = prevfreq;
+
+                       if (freq >= freqmax) {
+                               /*
+                                * Find the highest frequency that is less
+                                * than freqmax.
+                                */
+                               if (cpufreq_frequency_table_target(
+                                           pcpu->policy, pcpu->freq_table,
+                                           freqmax - 1, CPUFREQ_RELATION_H,
+                                           &index))
+                                       break;
+                               freq = pcpu->freq_table[index].frequency;
+
+                               if (freq == freqmin) {
+                                       /*
+                                        * The first frequency below freqmax
+                                        * has already been found to be too
+                                        * low.  freqmax is the lowest speed
+                                        * we found that is fast enough.
+                                        */
+                                       freq = freqmax;
+                                       break;
+                               }
+                       }
+               } else if (freq < prevfreq) {
+                       /* The previous frequency is high enough. */
+                       freqmax = prevfreq;
+
+                       if (freq <= freqmin) {
+                               /*
+                                * Find the lowest frequency that is higher
+                                * than freqmin.
+                                */
+                               if (cpufreq_frequency_table_target(
+                                           pcpu->policy, pcpu->freq_table,
+                                           freqmin + 1, CPUFREQ_RELATION_L,
+                                           &index))
+                                       break;
+                               freq = pcpu->freq_table[index].frequency;
+
+                               /*
+                                * If freqmax is the first frequency above
+                                * freqmin then we have already found that
+                                * this speed is fast enough.
+                                */
+                               if (freq == freqmax)
+                                       break;
+                       }
+               }
+
+               /* If same frequency chosen as previous then done. */
+       } while (freq != prevfreq);
+
+       return freq;
+}
+
+static u64 update_load(int cpu)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+       struct cpufreq_interactive_tunables *tunables =
+               pcpu->policy->governor_data;
+       u64 now;
+       u64 now_idle;
+       unsigned int delta_idle;
+       unsigned int delta_time;
+       u64 active_time;
+
+       now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
+       delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
+       delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
+
+       if (delta_time <= delta_idle)
+               active_time = 0;
+       else
+               active_time = delta_time - delta_idle;
+
+       pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
+
+       pcpu->time_in_idle = now_idle;
+       pcpu->time_in_idle_timestamp = now;
+       return now;
+}
+
+static void cpufreq_interactive_timer(unsigned long data)
+{
+       u64 now;
+       unsigned int delta_time;
+       u64 cputime_speedadj;
+       int cpu_load;
+       struct cpufreq_interactive_cpuinfo *pcpu =
+               &per_cpu(cpuinfo, data);
+       struct cpufreq_interactive_tunables *tunables =
+               pcpu->policy->governor_data;
+       unsigned int new_freq;
+       unsigned int loadadjfreq;
+       unsigned int index;
+       unsigned long flags;
+       u64 max_fvtime;
+
+       if (!down_read_trylock(&pcpu->enable_sem))
+               return;
+       if (!pcpu->governor_enabled)
+               goto exit;
+
+       spin_lock_irqsave(&pcpu->load_lock, flags);
+       now = update_load(data);
+       delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
+       cputime_speedadj = pcpu->cputime_speedadj;
+       spin_unlock_irqrestore(&pcpu->load_lock, flags);
+
+       if (WARN_ON_ONCE(!delta_time))
+               goto rearm;
+
+       spin_lock_irqsave(&pcpu->target_freq_lock, flags);
+       do_div(cputime_speedadj, delta_time);
+       loadadjfreq = (unsigned int)cputime_speedadj * 100;
+       cpu_load = loadadjfreq / pcpu->policy->cur;
+       tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
+
+       if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
+               if (pcpu->policy->cur < tunables->hispeed_freq) {
+                       new_freq = tunables->hispeed_freq;
+               } else {
+                       new_freq = choose_freq(pcpu, loadadjfreq);
+
+                       if (new_freq < tunables->hispeed_freq)
+                               new_freq = tunables->hispeed_freq;
+               }
+       } else {
+               new_freq = choose_freq(pcpu, loadadjfreq);
+               if (new_freq > tunables->hispeed_freq &&
+                               pcpu->policy->cur < tunables->hispeed_freq)
+                       new_freq = tunables->hispeed_freq;
+       }
+
+       if (pcpu->policy->cur >= tunables->hispeed_freq &&
+           new_freq > pcpu->policy->cur &&
+           now - pcpu->pol_hispeed_val_time <
+           freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) {
+               trace_cpufreq_interactive_notyet(
+                       data, cpu_load, pcpu->target_freq,
+                       pcpu->policy->cur, new_freq);
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+               goto rearm;
+       }
+
+       pcpu->loc_hispeed_val_time = now;
+
+       if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
+                                          new_freq, CPUFREQ_RELATION_L,
+                                          &index)) {
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+               goto rearm;
+       }
+
+       new_freq = pcpu->freq_table[index].frequency;
+
+       /*
+        * Do not scale below floor_freq unless we have been at or above the
+        * floor frequency for the minimum sample time since last validated.
+        */
+       max_fvtime = max(pcpu->pol_floor_val_time, pcpu->loc_floor_val_time);
+       if (new_freq < pcpu->floor_freq &&
+           pcpu->target_freq >= pcpu->policy->cur) {
+               if (now - max_fvtime < tunables->min_sample_time) {
+                       trace_cpufreq_interactive_notyet(
+                               data, cpu_load, pcpu->target_freq,
+                               pcpu->policy->cur, new_freq);
+                       spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+                       goto rearm;
+               }
+       }
+
+       /*
+        * Update the timestamp for checking whether speed has been held at
+        * or above the selected frequency for a minimum of min_sample_time,
+        * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
+        * allow the speed to drop as soon as the boostpulse duration expires
+        * (or the indefinite boost is turned off).
+        */
+
+       if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
+               pcpu->floor_freq = new_freq;
+               if (pcpu->target_freq >= pcpu->policy->cur ||
+                   new_freq >= pcpu->policy->cur)
+                       pcpu->loc_floor_val_time = now;
+       }
+
+       if (pcpu->target_freq == new_freq &&
+                       pcpu->target_freq <= pcpu->policy->cur) {
+               trace_cpufreq_interactive_already(
+                       data, cpu_load, pcpu->target_freq,
+                       pcpu->policy->cur, new_freq);
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+               goto rearm;
+       }
+
+       trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
+                                        pcpu->policy->cur, new_freq);
+
+       pcpu->target_freq = new_freq;
+       spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+       spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+       cpumask_set_cpu(data, &speedchange_cpumask);
+       spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+       wake_up_process(speedchange_task);
+
+rearm:
+       if (!timer_pending(&pcpu->cpu_timer))
+               cpufreq_interactive_timer_resched(pcpu);
+
+exit:
+       up_read(&pcpu->enable_sem);
+       return;
+}
+
+static void cpufreq_interactive_idle_end(void)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu =
+               &per_cpu(cpuinfo, smp_processor_id());
+
+       if (!down_read_trylock(&pcpu->enable_sem))
+               return;
+       if (!pcpu->governor_enabled) {
+               up_read(&pcpu->enable_sem);
+               return;
+       }
+
+       /* Arm the timer for 1-2 ticks later if not already. */
+       if (!timer_pending(&pcpu->cpu_timer)) {
+               cpufreq_interactive_timer_resched(pcpu);
+       } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
+               del_timer(&pcpu->cpu_timer);
+               del_timer(&pcpu->cpu_slack_timer);
+               cpufreq_interactive_timer(smp_processor_id());
+       }
+
+       up_read(&pcpu->enable_sem);
+}
+
+static void cpufreq_interactive_get_policy_info(struct cpufreq_policy *policy,
+                                               unsigned int *pmax_freq,
+                                               u64 *phvt, u64 *pfvt)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       unsigned int max_freq = 0;
+       u64 hvt = ~0ULL, fvt = 0;
+       unsigned int i;
+
+       for_each_cpu(i, policy->cpus) {
+               pcpu = &per_cpu(cpuinfo, i);
+
+               fvt = max(fvt, pcpu->loc_floor_val_time);
+               if (pcpu->target_freq > max_freq) {
+                       max_freq = pcpu->target_freq;
+                       hvt = pcpu->loc_hispeed_val_time;
+               } else if (pcpu->target_freq == max_freq) {
+                       hvt = min(hvt, pcpu->loc_hispeed_val_time);
+               }
+       }
+
+       *pmax_freq = max_freq;
+       *phvt = hvt;
+       *pfvt = fvt;
+}
+
+static void cpufreq_interactive_adjust_cpu(unsigned int cpu,
+                                          struct cpufreq_policy *policy)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       u64 hvt, fvt;
+       unsigned int max_freq;
+       int i;
+
+       cpufreq_interactive_get_policy_info(policy, &max_freq, &hvt, &fvt);
+
+       for_each_cpu(i, policy->cpus) {
+               pcpu = &per_cpu(cpuinfo, i);
+               pcpu->pol_floor_val_time = fvt;
+       }
+
+       if (max_freq != policy->cur) {
+               __cpufreq_driver_target(policy, max_freq, CPUFREQ_RELATION_H);
+               for_each_cpu(i, policy->cpus) {
+                       pcpu = &per_cpu(cpuinfo, i);
+                       pcpu->pol_hispeed_val_time = hvt;
+               }
+       }
+
+       trace_cpufreq_interactive_setspeed(cpu, max_freq, policy->cur);
+}
+
+static int cpufreq_interactive_speedchange_task(void *data)
+{
+       unsigned int cpu;
+       cpumask_t tmp_mask;
+       unsigned long flags;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+
+       while (1) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+               if (cpumask_empty(&speedchange_cpumask)) {
+                       spin_unlock_irqrestore(&speedchange_cpumask_lock,
+                                              flags);
+                       schedule();
+
+                       if (kthread_should_stop())
+                               break;
+
+                       spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+               }
+
+               set_current_state(TASK_RUNNING);
+               tmp_mask = speedchange_cpumask;
+               cpumask_clear(&speedchange_cpumask);
+               spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+               for_each_cpu(cpu, &tmp_mask) {
+                       pcpu = &per_cpu(cpuinfo, cpu);
+
+                       down_write(&pcpu->policy->rwsem);
+
+                       if (likely(down_read_trylock(&pcpu->enable_sem))) {
+                               if (likely(pcpu->governor_enabled))
+                                       cpufreq_interactive_adjust_cpu(cpu,
+                                                       pcpu->policy);
+                               up_read(&pcpu->enable_sem);
+                       }
+
+                       up_write(&pcpu->policy->rwsem);
+               }
+       }
+
+       return 0;
+}
+
+static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
+{
+       int i;
+       int anyboost = 0;
+       unsigned long flags[2];
+       struct cpufreq_interactive_cpuinfo *pcpu;
+
+       tunables->boosted = true;
+
+       spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
+
+       for_each_online_cpu(i) {
+               pcpu = &per_cpu(cpuinfo, i);
+
+               if (!down_read_trylock(&pcpu->enable_sem))
+                       continue;
+
+               if (!pcpu->governor_enabled) {
+                       up_read(&pcpu->enable_sem);
+                       continue;
+               }
+
+               if (tunables != pcpu->policy->governor_data) {
+                       up_read(&pcpu->enable_sem);
+                       continue;
+               }
+
+               spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
+               if (pcpu->target_freq < tunables->hispeed_freq) {
+                       pcpu->target_freq = tunables->hispeed_freq;
+                       cpumask_set_cpu(i, &speedchange_cpumask);
+                       pcpu->pol_hispeed_val_time =
+                               ktime_to_us(ktime_get());
+                       anyboost = 1;
+               }
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
+
+               up_read(&pcpu->enable_sem);
+       }
+
+       spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
+
+       if (anyboost)
+               wake_up_process(speedchange_task);
+}
+
+static int cpufreq_interactive_notifier(
+       struct notifier_block *nb, unsigned long val, void *data)
+{
+       struct cpufreq_freqs *freq = data;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       int cpu;
+       unsigned long flags;
+
+       if (val == CPUFREQ_POSTCHANGE) {
+               pcpu = &per_cpu(cpuinfo, freq->cpu);
+               if (!down_read_trylock(&pcpu->enable_sem))
+                       return 0;
+               if (!pcpu->governor_enabled) {
+                       up_read(&pcpu->enable_sem);
+                       return 0;
+               }
+
+               for_each_cpu(cpu, pcpu->policy->cpus) {
+                       struct cpufreq_interactive_cpuinfo *pjcpu =
+                               &per_cpu(cpuinfo, cpu);
+                       if (cpu != freq->cpu) {
+                               if (!down_read_trylock(&pjcpu->enable_sem))
+                                       continue;
+                               if (!pjcpu->governor_enabled) {
+                                       up_read(&pjcpu->enable_sem);
+                                       continue;
+                               }
+                       }
+                       spin_lock_irqsave(&pjcpu->load_lock, flags);
+                       update_load(cpu);
+                       spin_unlock_irqrestore(&pjcpu->load_lock, flags);
+                       if (cpu != freq->cpu)
+                               up_read(&pjcpu->enable_sem);
+               }
+
+               up_read(&pcpu->enable_sem);
+       }
+       return 0;
+}
+
+static struct notifier_block cpufreq_notifier_block = {
+       .notifier_call = cpufreq_interactive_notifier,
+};
+
+static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
+{
+       const char *cp;
+       int i;
+       int ntokens = 1;
+       unsigned int *tokenized_data;
+       int err = -EINVAL;
+
+       cp = buf;
+       while ((cp = strpbrk(cp + 1, " :")))
+               ntokens++;
+
+       if (!(ntokens & 0x1))
+               goto err;
+
+       tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
+       if (!tokenized_data) {
+               err = -ENOMEM;
+               goto err;
+       }
+
+       cp = buf;
+       i = 0;
+       while (i < ntokens) {
+               if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
+                       goto err_kfree;
+
+               cp = strpbrk(cp, " :");
+               if (!cp)
+                       break;
+               cp++;
+       }
+
+       if (i != ntokens)
+               goto err_kfree;
+
+       *num_tokens = ntokens;
+       return tokenized_data;
+
+err_kfree:
+       kfree(tokenized_data);
+err:
+       return ERR_PTR(err);
+}
+
+static ssize_t show_target_loads(
+       struct cpufreq_interactive_tunables *tunables,
+       char *buf)
+{
+       int i;
+       ssize_t ret = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tunables->target_loads_lock, flags);
+
+       for (i = 0; i < tunables->ntarget_loads; i++)
+               ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
+                              i & 0x1 ? ":" : " ");
+
+       sprintf(buf + ret - 1, "\n");
+       spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+       return ret;
+}
+
+static ssize_t store_target_loads(
+       struct cpufreq_interactive_tunables *tunables,
+       const char *buf, size_t count)
+{
+       int ntokens;
+       unsigned int *new_target_loads = NULL;
+       unsigned long flags;
+
+       new_target_loads = get_tokenized_data(buf, &ntokens);
+       if (IS_ERR(new_target_loads))
+               return PTR_RET(new_target_loads);
+
+       spin_lock_irqsave(&tunables->target_loads_lock, flags);
+       if (tunables->target_loads != default_target_loads)
+               kfree(tunables->target_loads);
+       tunables->target_loads = new_target_loads;
+       tunables->ntarget_loads = ntokens;
+       spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+       return count;
+}
+
+static ssize_t show_above_hispeed_delay(
+       struct cpufreq_interactive_tunables *tunables, char *buf)
+{
+       int i;
+       ssize_t ret = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+       for (i = 0; i < tunables->nabove_hispeed_delay; i++)
+               ret += sprintf(buf + ret, "%u%s",
+                              tunables->above_hispeed_delay[i],
+                              i & 0x1 ? ":" : " ");
+
+       sprintf(buf + ret - 1, "\n");
+       spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+       return ret;
+}
+
+static ssize_t store_above_hispeed_delay(
+       struct cpufreq_interactive_tunables *tunables,
+       const char *buf, size_t count)
+{
+       int ntokens;
+       unsigned int *new_above_hispeed_delay = NULL;
+       unsigned long flags;
+
+       new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
+       if (IS_ERR(new_above_hispeed_delay))
+               return PTR_RET(new_above_hispeed_delay);
+
+       spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+       if (tunables->above_hispeed_delay != default_above_hispeed_delay)
+               kfree(tunables->above_hispeed_delay);
+       tunables->above_hispeed_delay = new_above_hispeed_delay;
+       tunables->nabove_hispeed_delay = ntokens;
+       spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+       return count;
+
+}
+
+static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+               char *buf)
+{
+       return sprintf(buf, "%u\n", tunables->hispeed_freq);
+}
+
+static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+               const char *buf, size_t count)
+{
+       int ret;
+       long unsigned int val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       tunables->hispeed_freq = val;
+       return count;
+}
+
+static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
+               *tunables, char *buf)
+{
+       return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
+}
+
+static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
+               *tunables, const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       tunables->go_hispeed_load = val;
+       return count;
+}
+
+static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
+               *tunables, char *buf)
+{
+       return sprintf(buf, "%lu\n", tunables->min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
+               *tunables, const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       tunables->min_sample_time = val;
+       return count;
+}
+
+static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
+               char *buf)
+{
+       return sprintf(buf, "%lu\n", tunables->timer_rate);
+}
+
+static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
+               const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val, val_round;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       val_round = jiffies_to_usecs(usecs_to_jiffies(val));
+       if (val != val_round)
+               pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
+                       val_round);
+
+       tunables->timer_rate = val_round;
+       return count;
+}
+
+static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
+               char *buf)
+{
+       return sprintf(buf, "%d\n", tunables->timer_slack_val);
+}
+
+static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
+               const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       tunables->timer_slack_val = val;
+       return count;
+}
+
+static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
+                         char *buf)
+{
+       return sprintf(buf, "%d\n", tunables->boost_val);
+}
+
+static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
+                          const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       tunables->boost_val = val;
+
+       if (tunables->boost_val) {
+               trace_cpufreq_interactive_boost("on");
+               if (!tunables->boosted)
+                       cpufreq_interactive_boost(tunables);
+       } else {
+               tunables->boostpulse_endtime = ktime_to_us(ktime_get());
+               trace_cpufreq_interactive_unboost("off");
+       }
+
+       return count;
+}
+
+static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
+                               const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
+               tunables->boostpulse_duration_val;
+       trace_cpufreq_interactive_boost("pulse");
+       if (!tunables->boosted)
+               cpufreq_interactive_boost(tunables);
+       return count;
+}
+
+static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
+               *tunables, char *buf)
+{
+       return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
+}
+
+static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
+               *tunables, const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       tunables->boostpulse_duration_val = val;
+       return count;
+}
+
+static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+               char *buf)
+{
+       return sprintf(buf, "%u\n", tunables->io_is_busy);
+}
+
+static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+               const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       tunables->io_is_busy = val;
+       return count;
+}
+
+/*
+ * Create show/store routines
+ * - sys: One governor instance for complete SYSTEM
+ * - pol: One governor instance per struct cpufreq_policy
+ */
+#define show_gov_pol_sys(file_name)                                    \
+static ssize_t show_##file_name##_gov_sys                              \
+(struct kobject *kobj, struct attribute *attr, char *buf)              \
+{                                                                      \
+       return show_##file_name(common_tunables, buf);                  \
+}                                                                      \
+                                                                       \
+static ssize_t show_##file_name##_gov_pol                              \
+(struct cpufreq_policy *policy, char *buf)                             \
+{                                                                      \
+       return show_##file_name(policy->governor_data, buf);            \
+}
+
+#define store_gov_pol_sys(file_name)                                   \
+static ssize_t store_##file_name##_gov_sys                             \
+(struct kobject *kobj, struct attribute *attr, const char *buf,                \
+       size_t count)                                                   \
+{                                                                      \
+       return store_##file_name(common_tunables, buf, count);          \
+}                                                                      \
+                                                                       \
+static ssize_t store_##file_name##_gov_pol                             \
+(struct cpufreq_policy *policy, const char *buf, size_t count)         \
+{                                                                      \
+       return store_##file_name(policy->governor_data, buf, count);    \
+}
+
+#define show_store_gov_pol_sys(file_name)                              \
+show_gov_pol_sys(file_name);                                           \
+store_gov_pol_sys(file_name)
+
+show_store_gov_pol_sys(target_loads);
+show_store_gov_pol_sys(above_hispeed_delay);
+show_store_gov_pol_sys(hispeed_freq);
+show_store_gov_pol_sys(go_hispeed_load);
+show_store_gov_pol_sys(min_sample_time);
+show_store_gov_pol_sys(timer_rate);
+show_store_gov_pol_sys(timer_slack);
+show_store_gov_pol_sys(boost);
+store_gov_pol_sys(boostpulse);
+show_store_gov_pol_sys(boostpulse_duration);
+show_store_gov_pol_sys(io_is_busy);
+
+#define gov_sys_attr_rw(_name)                                         \
+static struct global_attr _name##_gov_sys =                            \
+__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
+
+#define gov_pol_attr_rw(_name)                                         \
+static struct freq_attr _name##_gov_pol =                              \
+__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define gov_sys_pol_attr_rw(_name)                                     \
+       gov_sys_attr_rw(_name);                                         \
+       gov_pol_attr_rw(_name)
+
+gov_sys_pol_attr_rw(target_loads);
+gov_sys_pol_attr_rw(above_hispeed_delay);
+gov_sys_pol_attr_rw(hispeed_freq);
+gov_sys_pol_attr_rw(go_hispeed_load);
+gov_sys_pol_attr_rw(min_sample_time);
+gov_sys_pol_attr_rw(timer_rate);
+gov_sys_pol_attr_rw(timer_slack);
+gov_sys_pol_attr_rw(boost);
+gov_sys_pol_attr_rw(boostpulse_duration);
+gov_sys_pol_attr_rw(io_is_busy);
+
+static struct global_attr boostpulse_gov_sys =
+       __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
+
+static struct freq_attr boostpulse_gov_pol =
+       __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
+
+/* One Governor instance for entire system */
+static struct attribute *interactive_attributes_gov_sys[] = {
+       &target_loads_gov_sys.attr,
+       &above_hispeed_delay_gov_sys.attr,
+       &hispeed_freq_gov_sys.attr,
+       &go_hispeed_load_gov_sys.attr,
+       &min_sample_time_gov_sys.attr,
+       &timer_rate_gov_sys.attr,
+       &timer_slack_gov_sys.attr,
+       &boost_gov_sys.attr,
+       &boostpulse_gov_sys.attr,
+       &boostpulse_duration_gov_sys.attr,
+       &io_is_busy_gov_sys.attr,
+       NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_sys = {
+       .attrs = interactive_attributes_gov_sys,
+       .name = "interactive",
+};
+
+/* Per policy governor instance */
+static struct attribute *interactive_attributes_gov_pol[] = {
+       &target_loads_gov_pol.attr,
+       &above_hispeed_delay_gov_pol.attr,
+       &hispeed_freq_gov_pol.attr,
+       &go_hispeed_load_gov_pol.attr,
+       &min_sample_time_gov_pol.attr,
+       &timer_rate_gov_pol.attr,
+       &timer_slack_gov_pol.attr,
+       &boost_gov_pol.attr,
+       &boostpulse_gov_pol.attr,
+       &boostpulse_duration_gov_pol.attr,
+       &io_is_busy_gov_pol.attr,
+       NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_pol = {
+       .attrs = interactive_attributes_gov_pol,
+       .name = "interactive",
+};
+
+static struct attribute_group *get_sysfs_attr(void)
+{
+       if (have_governor_per_policy())
+               return &interactive_attr_group_gov_pol;
+       else
+               return &interactive_attr_group_gov_sys;
+}
+
+static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
+                                            unsigned long val,
+                                            void *data)
+{
+       if (val == IDLE_END)
+               cpufreq_interactive_idle_end();
+
+       return 0;
+}
+
+static struct notifier_block cpufreq_interactive_idle_nb = {
+       .notifier_call = cpufreq_interactive_idle_notifier,
+};
+
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+               unsigned int event)
+{
+       int rc;
+       unsigned int j;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       struct cpufreq_frequency_table *freq_table;
+       struct cpufreq_interactive_tunables *tunables;
+       unsigned long flags;
+
+       if (have_governor_per_policy())
+               tunables = policy->governor_data;
+       else
+               tunables = common_tunables;
+
+       WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
+
+       switch (event) {
+       case CPUFREQ_GOV_POLICY_INIT:
+               if (have_governor_per_policy()) {
+                       WARN_ON(tunables);
+               } else if (tunables) {
+                       tunables->usage_count++;
+                       policy->governor_data = tunables;
+                       return 0;
+               }
+
+               tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
+               if (!tunables) {
+                       pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
+                       return -ENOMEM;
+               }
+
+               tunables->usage_count = 1;
+               tunables->above_hispeed_delay = default_above_hispeed_delay;
+               tunables->nabove_hispeed_delay =
+                       ARRAY_SIZE(default_above_hispeed_delay);
+               tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+               tunables->target_loads = default_target_loads;
+               tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
+               tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+               tunables->timer_rate = DEFAULT_TIMER_RATE;
+               tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+               tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
+
+               spin_lock_init(&tunables->target_loads_lock);
+               spin_lock_init(&tunables->above_hispeed_delay_lock);
+
+               policy->governor_data = tunables;
+               if (!have_governor_per_policy()) {
+                       common_tunables = tunables;
+               }
+
+               rc = sysfs_create_group(get_governor_parent_kobj(policy),
+                               get_sysfs_attr());
+               if (rc) {
+                       kfree(tunables);
+                       policy->governor_data = NULL;
+                       if (!have_governor_per_policy()) {
+                               common_tunables = NULL;
+                       }
+                       return rc;
+               }
+
+               if (!policy->governor->initialized) {
+                       idle_notifier_register(&cpufreq_interactive_idle_nb);
+                       cpufreq_register_notifier(&cpufreq_notifier_block,
+                                       CPUFREQ_TRANSITION_NOTIFIER);
+               }
+
+               break;
+
+       case CPUFREQ_GOV_POLICY_EXIT:
+               if (!--tunables->usage_count) {
+                       if (policy->governor->initialized == 1) {
+                               cpufreq_unregister_notifier(&cpufreq_notifier_block,
+                                               CPUFREQ_TRANSITION_NOTIFIER);
+                               idle_notifier_unregister(&cpufreq_interactive_idle_nb);
+                       }
+
+                       sysfs_remove_group(get_governor_parent_kobj(policy),
+                                       get_sysfs_attr());
+
+                       kfree(tunables);
+                       common_tunables = NULL;
+               }
+
+               policy->governor_data = NULL;
+               break;
+
+       case CPUFREQ_GOV_START:
+               mutex_lock(&gov_lock);
+
+               freq_table = cpufreq_frequency_get_table(policy->cpu);
+               if (!tunables->hispeed_freq)
+                       tunables->hispeed_freq = policy->max;
+
+               for_each_cpu(j, policy->cpus) {
+                       pcpu = &per_cpu(cpuinfo, j);
+                       pcpu->policy = policy;
+                       pcpu->target_freq = policy->cur;
+                       pcpu->freq_table = freq_table;
+                       pcpu->floor_freq = pcpu->target_freq;
+                       pcpu->pol_floor_val_time =
+                               ktime_to_us(ktime_get());
+                       pcpu->loc_floor_val_time = pcpu->pol_floor_val_time;
+                       pcpu->pol_hispeed_val_time = pcpu->pol_floor_val_time;
+                       pcpu->loc_hispeed_val_time = pcpu->pol_floor_val_time;
+                       down_write(&pcpu->enable_sem);
+                       del_timer_sync(&pcpu->cpu_timer);
+                       del_timer_sync(&pcpu->cpu_slack_timer);
+                       cpufreq_interactive_timer_start(tunables, j);
+                       pcpu->governor_enabled = 1;
+                       up_write(&pcpu->enable_sem);
+               }
+
+               mutex_unlock(&gov_lock);
+               break;
+
+       case CPUFREQ_GOV_STOP:
+               mutex_lock(&gov_lock);
+               for_each_cpu(j, policy->cpus) {
+                       pcpu = &per_cpu(cpuinfo, j);
+                       down_write(&pcpu->enable_sem);
+                       pcpu->governor_enabled = 0;
+                       del_timer_sync(&pcpu->cpu_timer);
+                       del_timer_sync(&pcpu->cpu_slack_timer);
+                       up_write(&pcpu->enable_sem);
+               }
+
+               mutex_unlock(&gov_lock);
+               break;
+
+       case CPUFREQ_GOV_LIMITS:
+               if (policy->max < policy->cur)
+                       __cpufreq_driver_target(policy,
+                                       policy->max, CPUFREQ_RELATION_H);
+               else if (policy->min > policy->cur)
+                       __cpufreq_driver_target(policy,
+                                       policy->min, CPUFREQ_RELATION_L);
+               for_each_cpu(j, policy->cpus) {
+                       pcpu = &per_cpu(cpuinfo, j);
+
+                       down_read(&pcpu->enable_sem);
+                       if (pcpu->governor_enabled == 0) {
+                               up_read(&pcpu->enable_sem);
+                               continue;
+                       }
+
+                       spin_lock_irqsave(&pcpu->target_freq_lock, flags);
+                       if (policy->max < pcpu->target_freq)
+                               pcpu->target_freq = policy->max;
+                       else if (policy->min > pcpu->target_freq)
+                               pcpu->target_freq = policy->min;
+
+                       spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+                       up_read(&pcpu->enable_sem);
+               }
+               break;
+       }
+       return 0;
+}
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_interactive = {
+       .name = "interactive",
+       .governor = cpufreq_governor_interactive,
+       .max_transition_latency = 10000000,
+       .owner = THIS_MODULE,
+};
+
+static void cpufreq_interactive_nop_timer(unsigned long data)
+{
+}
+
+static int __init cpufreq_interactive_init(void)
+{
+       unsigned int i;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+       /* Initalize per-cpu timers */
+       for_each_possible_cpu(i) {
+               pcpu = &per_cpu(cpuinfo, i);
+               init_timer_deferrable(&pcpu->cpu_timer);
+               pcpu->cpu_timer.function = cpufreq_interactive_timer;
+               pcpu->cpu_timer.data = i;
+               init_timer(&pcpu->cpu_slack_timer);
+               pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
+               spin_lock_init(&pcpu->load_lock);
+               spin_lock_init(&pcpu->target_freq_lock);
+               init_rwsem(&pcpu->enable_sem);
+       }
+
+       spin_lock_init(&speedchange_cpumask_lock);
+       mutex_init(&gov_lock);
+       speedchange_task =
+               kthread_create(cpufreq_interactive_speedchange_task, NULL,
+                              "cfinteractive");
+       if (IS_ERR(speedchange_task))
+               return PTR_ERR(speedchange_task);
+
+       sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
+       get_task_struct(speedchange_task);
+
+       /* NB: wake up so the thread does not look hung to the freezer */
+       wake_up_process(speedchange_task);
+
+       return cpufreq_register_governor(&cpufreq_gov_interactive);
+}
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+fs_initcall(cpufreq_interactive_init);
+#else
+module_init(cpufreq_interactive_init);
+#endif
+
+static void __exit cpufreq_interactive_exit(void)
+{
+       cpufreq_unregister_governor(&cpufreq_gov_interactive);
+       kthread_stop(speedchange_task);
+       put_task_struct(speedchange_task);
+}
+
+module_exit(cpufreq_interactive_exit);
+
+MODULE_AUTHOR("Mike Chan <mike@android.com>");
+MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
+       "Latency sensitive workloads");
+MODULE_LICENSE("GPL");
index d40b2c077746395c5cbc98e6d79b3267a0a42d7c..151971627757b0ce968636cb3100375ac68852e5 100644 (file)
@@ -192,7 +192,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
        }
 
        /* Take note of the planned idle state. */
-       sched_idle_set_state(target_state);
+       sched_idle_set_state(target_state, index);
 
        trace_cpu_idle_rcuidle(index, dev->cpu);
        time_start = ktime_get();
@@ -205,7 +205,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
        /* The cpu is no longer idle or about to enter idle. */
-       sched_idle_set_state(NULL);
+       sched_idle_set_state(NULL, -1);
 
        if (broadcast) {
                if (WARN_ON_ONCE(!irqs_disabled()))
index 22e4463d1787ab3d37aaaa61cfc402c16a315a2d..0846b39b4ca6dd467a35914f2292ff4af6e485f4 100644 (file)
@@ -178,7 +178,12 @@ static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned lo
 
        /* for higher loadavg, we are more reluctant */
 
-       mult += 2 * get_loadavg(load);
+       /*
+        * this doesn't work as intended - it is almost always 0, but can
+        * sometimes, depending on workload, spike very high into the hundreds
+        * even when the average cpu load is under 10%.
+        */
+       /* mult += 2 * get_loadavg(); */
 
        /* for IO wait tasks (per cpu!) we add 5x each */
        mult += 10 * nr_iowaiters;
index 7b05dbe9b2964fd8c431dde0512a0ba2f4fd6ff2..0ed73ad1b87628314b792479da3320d7a44d3e90 100644 (file)
@@ -304,8 +304,12 @@ fence_remove_callback(struct fence *fence, struct fence_cb *cb)
        spin_lock_irqsave(fence->lock, flags);
 
        ret = !list_empty(&cb->node);
-       if (ret)
+       if (ret) {
                list_del_init(&cb->node);
+               if (list_empty(&fence->cb_list))
+                       if (fence->ops->disable_signaling)
+                               fence->ops->disable_signaling(fence);
+       }
 
        spin_unlock_irqrestore(fence->lock, flags);
 
index ec379a4164cc07474fd5f2c193cbe6437df0e6fd..f292917b00e7142425fd6676ee48bbd3050b8946 100644 (file)
@@ -18,3 +18,6 @@ obj-$(CONFIG_EFI_RUNTIME_MAP)         += runtime-map.o
 obj-$(CONFIG_EFI_RUNTIME_WRAPPERS)     += runtime-wrappers.o
 obj-$(CONFIG_EFI_STUB)                 += libstub/
 obj-$(CONFIG_EFI_FAKE_MEMMAP)          += fake_mem.o
+
+arm-obj-$(CONFIG_EFI)                  := arm-init.o arm-runtime.o
+obj-$(CONFIG_ARM64)                    += $(arm-obj-y)
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
new file mode 100644 (file)
index 0000000..9e15d57
--- /dev/null
@@ -0,0 +1,209 @@
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ *
+ * Copyright (C) 2013 - 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/mm_types.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+
+#include <asm/efi.h>
+
+struct efi_memory_map memmap;
+
+u64 efi_system_table;
+
+static int __init is_normal_ram(efi_memory_desc_t *md)
+{
+       if (md->attribute & EFI_MEMORY_WB)
+               return 1;
+       return 0;
+}
+
+/*
+ * Translate a EFI virtual address into a physical address: this is necessary,
+ * as some data members of the EFI system table are virtually remapped after
+ * SetVirtualAddressMap() has been called.
+ */
+static phys_addr_t efi_to_phys(unsigned long addr)
+{
+       efi_memory_desc_t *md;
+
+       for_each_efi_memory_desc(&memmap, md) {
+               if (!(md->attribute & EFI_MEMORY_RUNTIME))
+                       continue;
+               if (md->virt_addr == 0)
+                       /* no virtual mapping has been installed by the stub */
+                       break;
+               if (md->virt_addr <= addr &&
+                   (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
+                       return md->phys_addr + addr - md->virt_addr;
+       }
+       return addr;
+}
+
+static int __init uefi_init(void)
+{
+       efi_char16_t *c16;
+       void *config_tables;
+       size_t table_size;
+       char vendor[100] = "unknown";
+       int i, retval;
+
+       efi.systab = early_memremap(efi_system_table,
+                                   sizeof(efi_system_table_t));
+       if (efi.systab == NULL) {
+               pr_warn("Unable to map EFI system table.\n");
+               return -ENOMEM;
+       }
+
+       set_bit(EFI_BOOT, &efi.flags);
+       if (IS_ENABLED(CONFIG_64BIT))
+               set_bit(EFI_64BIT, &efi.flags);
+
+       /*
+        * Verify the EFI Table
+        */
+       if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
+               pr_err("System table signature incorrect\n");
+               retval = -EINVAL;
+               goto out;
+       }
+       if ((efi.systab->hdr.revision >> 16) < 2)
+               pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
+                       efi.systab->hdr.revision >> 16,
+                       efi.systab->hdr.revision & 0xffff);
+
+       /* Show what we know for posterity */
+       c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
+                            sizeof(vendor) * sizeof(efi_char16_t));
+       if (c16) {
+               for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
+                       vendor[i] = c16[i];
+               vendor[i] = '\0';
+               early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
+       }
+
+       pr_info("EFI v%u.%.02u by %s\n",
+               efi.systab->hdr.revision >> 16,
+               efi.systab->hdr.revision & 0xffff, vendor);
+
+       table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
+       config_tables = early_memremap(efi_to_phys(efi.systab->tables),
+                                      table_size);
+       if (config_tables == NULL) {
+               pr_warn("Unable to map EFI config table array.\n");
+               retval = -ENOMEM;
+               goto out;
+       }
+       retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
+                                        sizeof(efi_config_table_t), NULL);
+
+       early_memunmap(config_tables, table_size);
+out:
+       early_memunmap(efi.systab,  sizeof(efi_system_table_t));
+       return retval;
+}
+
+/*
+ * Return true for RAM regions we want to permanently reserve.
+ */
+static __init int is_reserve_region(efi_memory_desc_t *md)
+{
+       switch (md->type) {
+       case EFI_LOADER_CODE:
+       case EFI_LOADER_DATA:
+       case EFI_BOOT_SERVICES_CODE:
+       case EFI_BOOT_SERVICES_DATA:
+       case EFI_CONVENTIONAL_MEMORY:
+       case EFI_PERSISTENT_MEMORY:
+               return 0;
+       default:
+               break;
+       }
+       return is_normal_ram(md);
+}
+
+static __init void reserve_regions(void)
+{
+       efi_memory_desc_t *md;
+       u64 paddr, npages, size;
+
+       if (efi_enabled(EFI_DBG))
+               pr_info("Processing EFI memory map:\n");
+
+       for_each_efi_memory_desc(&memmap, md) {
+               paddr = md->phys_addr;
+               npages = md->num_pages;
+
+               if (efi_enabled(EFI_DBG)) {
+                       char buf[64];
+
+                       pr_info("  0x%012llx-0x%012llx %s",
+                               paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
+                               efi_md_typeattr_format(buf, sizeof(buf), md));
+               }
+
+               memrange_efi_to_native(&paddr, &npages);
+               size = npages << PAGE_SHIFT;
+
+               if (is_normal_ram(md))
+                       early_init_dt_add_memory_arch(paddr, size);
+
+               if (is_reserve_region(md)) {
+                       memblock_mark_nomap(paddr, size);
+                       if (efi_enabled(EFI_DBG))
+                               pr_cont("*");
+               }
+
+               if (efi_enabled(EFI_DBG))
+                       pr_cont("\n");
+       }
+
+       set_bit(EFI_MEMMAP, &efi.flags);
+}
+
+void __init efi_init(void)
+{
+       struct efi_fdt_params params;
+
+       /* Grab UEFI information placed in FDT by stub */
+       if (!efi_get_fdt_params(&params))
+               return;
+
+       efi_system_table = params.system_table;
+
+       memmap.phys_map = params.mmap;
+       memmap.map = early_memremap(params.mmap, params.mmap_size);
+       if (memmap.map == NULL) {
+               /*
+               * If we are booting via UEFI, the UEFI memory map is the only
+               * description of memory we have, so there is little point in
+               * proceeding if we cannot access it.
+               */
+               panic("Unable to map EFI memory map.\n");
+       }
+       memmap.map_end = memmap.map + params.mmap_size;
+       memmap.desc_size = params.desc_size;
+       memmap.desc_version = params.desc_ver;
+
+       if (uefi_init() < 0)
+               return;
+
+       reserve_regions();
+       early_memunmap(memmap.map, params.mmap_size);
+       memblock_mark_nomap(params.mmap & PAGE_MASK,
+                           PAGE_ALIGN(params.mmap_size +
+                                      (params.mmap & ~PAGE_MASK)));
+}
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
new file mode 100644 (file)
index 0000000..6ae21e4
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ *
+ * Copyright (C) 2013, 2014 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/mm_types.h>
+#include <linux/preempt.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <asm/cacheflush.h>
+#include <asm/efi.h>
+#include <asm/mmu.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+
+extern u64 efi_system_table;
+
+static struct mm_struct efi_mm = {
+       .mm_rb                  = RB_ROOT,
+       .mm_users               = ATOMIC_INIT(2),
+       .mm_count               = ATOMIC_INIT(1),
+       .mmap_sem               = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
+       .page_table_lock        = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
+       .mmlist                 = LIST_HEAD_INIT(efi_mm.mmlist),
+};
+
+static bool __init efi_virtmap_init(void)
+{
+       efi_memory_desc_t *md;
+
+       efi_mm.pgd = pgd_alloc(&efi_mm);
+       init_new_context(NULL, &efi_mm);
+
+       for_each_efi_memory_desc(&memmap, md) {
+               phys_addr_t phys = md->phys_addr;
+               int ret;
+
+               if (!(md->attribute & EFI_MEMORY_RUNTIME))
+                       continue;
+               if (md->virt_addr == 0)
+                       return false;
+
+               ret = efi_create_mapping(&efi_mm, md);
+               if  (!ret) {
+                       pr_info("  EFI remap %pa => %p\n",
+                               &phys, (void *)(unsigned long)md->virt_addr);
+               } else {
+                       pr_warn("  EFI remap %pa: failed to create mapping (%d)\n",
+                               &phys, ret);
+                       return false;
+               }
+       }
+       return true;
+}
+
+/*
+ * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
+ * non-early mapping of the UEFI system table and virtual mappings for all
+ * EFI_MEMORY_RUNTIME regions.
+ */
+static int __init arm_enable_runtime_services(void)
+{
+       u64 mapsize;
+
+       if (!efi_enabled(EFI_BOOT)) {
+               pr_info("EFI services will not be available.\n");
+               return 0;
+       }
+
+       if (efi_runtime_disabled()) {
+               pr_info("EFI runtime services will be disabled.\n");
+               return 0;
+       }
+
+       pr_info("Remapping and enabling EFI services.\n");
+
+       mapsize = memmap.map_end - memmap.map;
+       memmap.map = (__force void *)ioremap_cache(memmap.phys_map,
+                                                  mapsize);
+       if (!memmap.map) {
+               pr_err("Failed to remap EFI memory map\n");
+               return -ENOMEM;
+       }
+       memmap.map_end = memmap.map + mapsize;
+       efi.memmap = &memmap;
+
+       efi.systab = (__force void *)ioremap_cache(efi_system_table,
+                                                  sizeof(efi_system_table_t));
+       if (!efi.systab) {
+               pr_err("Failed to remap EFI System Table\n");
+               return -ENOMEM;
+       }
+       set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+
+       if (!efi_virtmap_init()) {
+               pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
+               return -ENOMEM;
+       }
+
+       /* Set up runtime services function pointers */
+       efi_native_runtime_setup();
+       set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
+       efi.runtime_version = efi.systab->hdr.revision;
+
+       return 0;
+}
+early_initcall(arm_enable_runtime_services);
+
+void efi_virtmap_load(void)
+{
+       preempt_disable();
+       efi_set_pgd(&efi_mm);
+}
+
+void efi_virtmap_unload(void)
+{
+       efi_set_pgd(current->active_mm);
+       preempt_enable();
+}
index 3b52677f459ae24b2dd2dcd75b0c4fe95beeb64c..c51f3b2fe3c0868fe643ebe2b1d18554b61e1ca8 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 
+#include <asm/early_ioremap.h>
+
 struct efi __read_mostly efi = {
        .mps                    = EFI_INVALID_TABLE_ADDR,
        .acpi                   = EFI_INVALID_TABLE_ADDR,
index 92870cdb52d946a7408463a5b249129befd5e91d..8efaa88329aa3d40296fb5ff29aed1e20e2d1d37 100644 (file)
@@ -218,7 +218,8 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
                goto done_proc;
        }
 
-       remaining_bytes = do_div(buffer_size, sizeof(__s32));
+       remaining_bytes = buffer_size % sizeof(__s32);
+       buffer_size = buffer_size / sizeof(__s32);
        if (buffer_size) {
                for (i = 0; i < buffer_size; ++i) {
                        hid_set_field(report->field[field_index], i,
index a35532ec00e4131d40cda5a841a730e96e8e20e3..2557dcda7621a1dbe58ae54caef6bc85025207f3 100644 (file)
@@ -187,6 +187,19 @@ config INPUT_APMPOWER
          To compile this driver as a module, choose M here: the
          module will be called apm-power.
 
+config INPUT_KEYRESET
+       bool "Reset key"
+       depends on INPUT
+       select INPUT_KEYCOMBO
+       ---help---
+         Say Y here if you want to reboot when some keys are pressed;
+
+config INPUT_KEYCOMBO
+       bool "Key combo"
+       depends on INPUT
+       ---help---
+         Say Y here if you want to take action when some keys are pressed;
+
 comment "Input Device Drivers"
 
 source "drivers/input/keyboard/Kconfig"
index 0c9302ca9954f5887b1ff297648cd59dc215d629..2a6d05ab9170fc6bce9f0f97de5e135e3d29706c 100644 (file)
@@ -26,3 +26,6 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN)       += touchscreen/
 obj-$(CONFIG_INPUT_MISC)       += misc/
 
 obj-$(CONFIG_INPUT_APMPOWER)   += apm-power.o
+obj-$(CONFIG_INPUT_KEYRESET)   += keyreset.o
+obj-$(CONFIG_INPUT_KEYCOMBO)   += keycombo.o
+
index 907e4e278fce1dc8f85777b90354ecd5cecd6e36..c877e56a9bd5760f0ef9151460a6fd5f7d99a6b2 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/types.h>
 #include <linux/input.h>
+#include <linux/input/mt.h>
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/irq.h>
 #include <linux/io.h>
+#include <linux/acpi.h>
+
+#define GOLDFISH_MAX_FINGERS 5
 
 enum {
        REG_READ        = 0x00,
@@ -51,7 +55,21 @@ static irqreturn_t events_interrupt(int irq, void *dev_id)
        value = __raw_readl(edev->addr + REG_READ);
 
        input_event(edev->input, type, code, value);
-       input_sync(edev->input);
+       // Send an extra (EV_SYN, SYN_REPORT, 0x0) event
+       // if a key was pressed. Some keyboard device
+        // drivers may only send the EV_KEY event and
+        // not EV_SYN.
+        // Note that sending an extra SYN_REPORT is not
+        // necessary nor correct protocol with other
+        // devices such as touchscreens, which will send
+        // their own SYN_REPORT's when sufficient event
+        // information has been collected (e.g., for
+        // touchscreens, when pressure and X/Y coordinates
+       // have been received). Hence, we will only send
+       // this extra SYN_REPORT if type == EV_KEY.
+       if (type == EV_KEY) {
+               input_sync(edev->input);
+       }
        return IRQ_HANDLED;
 }
 
@@ -153,6 +171,15 @@ static int events_probe(struct platform_device *pdev)
 
        input_dev->name = edev->name;
        input_dev->id.bustype = BUS_HOST;
+       // Set the Goldfish Device to be multi-touch.
+       // In the Ranchu kernel, there is multi-touch-specific
+       // code for handling ABS_MT_SLOT events.
+       // See drivers/input/input.c:input_handle_abs_event.
+       // If we do not issue input_mt_init_slots,
+        // the kernel will filter out needed ABS_MT_SLOT
+        // events when we touch the screen in more than one place,
+        // preventing multi-touch with more than one finger from working.
+       input_mt_init_slots(input_dev, GOLDFISH_MAX_FINGERS, 0);
 
        events_import_bits(edev, input_dev->evbit, EV_SYN, EV_MAX);
        events_import_bits(edev, input_dev->keybit, EV_KEY, KEY_MAX);
@@ -178,10 +205,26 @@ static int events_probe(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id goldfish_events_of_match[] = {
+       { .compatible = "google,goldfish-events-keypad", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_events_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id goldfish_events_acpi_match[] = {
+       { "GFSH0002", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_events_acpi_match);
+#endif
+
 static struct platform_driver events_driver = {
        .probe  = events_probe,
        .driver = {
                .name   = "goldfish_events",
+               .of_match_table = goldfish_events_of_match,
+               .acpi_match_table = ACPI_PTR(goldfish_events_acpi_match),
        },
 };
 
diff --git a/drivers/input/keycombo.c b/drivers/input/keycombo.c
new file mode 100644 (file)
index 0000000..2fba451
--- /dev/null
@@ -0,0 +1,261 @@
+/* drivers/input/keycombo.c
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/keycombo.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+struct keycombo_state {
+       struct input_handler input_handler;
+       unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+       unsigned long upbit[BITS_TO_LONGS(KEY_CNT)];
+       unsigned long key[BITS_TO_LONGS(KEY_CNT)];
+       spinlock_t lock;
+       struct  workqueue_struct *wq;
+       int key_down_target;
+       int key_down;
+       int key_up;
+       struct delayed_work key_down_work;
+       int delay;
+       struct work_struct key_up_work;
+       void (*key_up_fn)(void *);
+       void (*key_down_fn)(void *);
+       void *priv;
+       int key_is_down;
+       struct wakeup_source combo_held_wake_source;
+       struct wakeup_source combo_up_wake_source;
+};
+
+static void do_key_down(struct work_struct *work)
+{
+       struct delayed_work *dwork = container_of(work, struct delayed_work,
+                                                                       work);
+       struct keycombo_state *state = container_of(dwork,
+                                       struct keycombo_state, key_down_work);
+       if (state->key_down_fn)
+               state->key_down_fn(state->priv);
+}
+
+static void do_key_up(struct work_struct *work)
+{
+       struct keycombo_state *state = container_of(work, struct keycombo_state,
+                                                               key_up_work);
+       if (state->key_up_fn)
+               state->key_up_fn(state->priv);
+       __pm_relax(&state->combo_up_wake_source);
+}
+
+static void keycombo_event(struct input_handle *handle, unsigned int type,
+               unsigned int code, int value)
+{
+       unsigned long flags;
+       struct keycombo_state *state = handle->private;
+
+       if (type != EV_KEY)
+               return;
+
+       if (code >= KEY_MAX)
+               return;
+
+       if (!test_bit(code, state->keybit))
+               return;
+
+       spin_lock_irqsave(&state->lock, flags);
+       if (!test_bit(code, state->key) == !value)
+               goto done;
+       __change_bit(code, state->key);
+       if (test_bit(code, state->upbit)) {
+               if (value)
+                       state->key_up++;
+               else
+                       state->key_up--;
+       } else {
+               if (value)
+                       state->key_down++;
+               else
+                       state->key_down--;
+       }
+       if (state->key_down == state->key_down_target && state->key_up == 0) {
+               __pm_stay_awake(&state->combo_held_wake_source);
+               state->key_is_down = 1;
+               if (queue_delayed_work(state->wq, &state->key_down_work,
+                                                               state->delay))
+                       pr_debug("Key down work already queued!");
+       } else if (state->key_is_down) {
+               if (!cancel_delayed_work(&state->key_down_work)) {
+                       __pm_stay_awake(&state->combo_up_wake_source);
+                       queue_work(state->wq, &state->key_up_work);
+               }
+               __pm_relax(&state->combo_held_wake_source);
+               state->key_is_down = 0;
+       }
+done:
+       spin_unlock_irqrestore(&state->lock, flags);
+}
+
+static int keycombo_connect(struct input_handler *handler,
+               struct input_dev *dev,
+               const struct input_device_id *id)
+{
+       int i;
+       int ret;
+       struct input_handle *handle;
+       struct keycombo_state *state =
+               container_of(handler, struct keycombo_state, input_handler);
+       for (i = 0; i < KEY_MAX; i++) {
+               if (test_bit(i, state->keybit) && test_bit(i, dev->keybit))
+                       break;
+       }
+       if (i == KEY_MAX)
+               return -ENODEV;
+
+       handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+       if (!handle)
+               return -ENOMEM;
+
+       handle->dev = dev;
+       handle->handler = handler;
+       handle->name = KEYCOMBO_NAME;
+       handle->private = state;
+
+       ret = input_register_handle(handle);
+       if (ret)
+               goto err_input_register_handle;
+
+       ret = input_open_device(handle);
+       if (ret)
+               goto err_input_open_device;
+
+       return 0;
+
+err_input_open_device:
+       input_unregister_handle(handle);
+err_input_register_handle:
+       kfree(handle);
+       return ret;
+}
+
+static void keycombo_disconnect(struct input_handle *handle)
+{
+       input_close_device(handle);
+       input_unregister_handle(handle);
+       kfree(handle);
+}
+
+static const struct input_device_id keycombo_ids[] = {
+               {
+                               .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+                               .evbit = { BIT_MASK(EV_KEY) },
+               },
+               { },
+};
+MODULE_DEVICE_TABLE(input, keycombo_ids);
+
+static int keycombo_probe(struct platform_device *pdev)
+{
+       int ret;
+       int key, *keyp;
+       struct keycombo_state *state;
+       struct keycombo_platform_data *pdata = pdev->dev.platform_data;
+
+       if (!pdata)
+               return -EINVAL;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return -ENOMEM;
+
+       spin_lock_init(&state->lock);
+       keyp = pdata->keys_down;
+       while ((key = *keyp++)) {
+               if (key >= KEY_MAX)
+                       continue;
+               state->key_down_target++;
+               __set_bit(key, state->keybit);
+       }
+       if (pdata->keys_up) {
+               keyp = pdata->keys_up;
+               while ((key = *keyp++)) {
+                       if (key >= KEY_MAX)
+                               continue;
+                       __set_bit(key, state->keybit);
+                       __set_bit(key, state->upbit);
+               }
+       }
+
+       state->wq = alloc_ordered_workqueue("keycombo", 0);
+       if (!state->wq)
+               return -ENOMEM;
+
+       state->priv = pdata->priv;
+
+       if (pdata->key_down_fn)
+               state->key_down_fn = pdata->key_down_fn;
+       INIT_DELAYED_WORK(&state->key_down_work, do_key_down);
+
+       if (pdata->key_up_fn)
+               state->key_up_fn = pdata->key_up_fn;
+       INIT_WORK(&state->key_up_work, do_key_up);
+
+       wakeup_source_init(&state->combo_held_wake_source, "key combo");
+       wakeup_source_init(&state->combo_up_wake_source, "key combo up");
+       state->delay = msecs_to_jiffies(pdata->key_down_delay);
+
+       state->input_handler.event = keycombo_event;
+       state->input_handler.connect = keycombo_connect;
+       state->input_handler.disconnect = keycombo_disconnect;
+       state->input_handler.name = KEYCOMBO_NAME;
+       state->input_handler.id_table = keycombo_ids;
+       ret = input_register_handler(&state->input_handler);
+       if (ret) {
+               kfree(state);
+               return ret;
+       }
+       platform_set_drvdata(pdev, state);
+       return 0;
+}
+
+int keycombo_remove(struct platform_device *pdev)
+{
+       struct keycombo_state *state = platform_get_drvdata(pdev);
+       input_unregister_handler(&state->input_handler);
+       destroy_workqueue(state->wq);
+       kfree(state);
+       return 0;
+}
+
+
+struct platform_driver keycombo_driver = {
+               .driver.name = KEYCOMBO_NAME,
+               .probe = keycombo_probe,
+               .remove = keycombo_remove,
+};
+
+static int __init keycombo_init(void)
+{
+       return platform_driver_register(&keycombo_driver);
+}
+
+static void __exit keycombo_exit(void)
+{
+       return platform_driver_unregister(&keycombo_driver);
+}
+
+module_init(keycombo_init);
+module_exit(keycombo_exit);
diff --git a/drivers/input/keyreset.c b/drivers/input/keyreset.c
new file mode 100644 (file)
index 0000000..7e5222a
--- /dev/null
@@ -0,0 +1,144 @@
+/* drivers/input/keyreset.c
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/keyreset.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/keycombo.h>
+
+struct keyreset_state {
+       int restart_requested;
+       int (*reset_fn)(void);
+       struct platform_device *pdev_child;
+       struct work_struct restart_work;
+};
+
+static void do_restart(struct work_struct *unused)
+{
+       orderly_reboot();
+}
+
+static void do_reset_fn(void *priv)
+{
+       struct keyreset_state *state = priv;
+       if (state->restart_requested)
+               panic("keyboard reset failed, %d", state->restart_requested);
+       if (state->reset_fn) {
+               state->restart_requested = state->reset_fn();
+       } else {
+               pr_info("keyboard reset\n");
+               schedule_work(&state->restart_work);
+               state->restart_requested = 1;
+       }
+}
+
+static int keyreset_probe(struct platform_device *pdev)
+{
+       int ret = -ENOMEM;
+       struct keycombo_platform_data *pdata_child;
+       struct keyreset_platform_data *pdata = pdev->dev.platform_data;
+       int up_size = 0, down_size = 0, size;
+       int key, *keyp;
+       struct keyreset_state *state;
+
+       if (!pdata)
+               return -EINVAL;
+       state = devm_kzalloc(&pdev->dev, sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return -ENOMEM;
+
+       state->pdev_child = platform_device_alloc(KEYCOMBO_NAME,
+                                                       PLATFORM_DEVID_AUTO);
+       if (!state->pdev_child)
+               return -ENOMEM;
+       state->pdev_child->dev.parent = &pdev->dev;
+       INIT_WORK(&state->restart_work, do_restart);
+
+       keyp = pdata->keys_down;
+       while ((key = *keyp++)) {
+               if (key >= KEY_MAX)
+                       continue;
+               down_size++;
+       }
+       if (pdata->keys_up) {
+               keyp = pdata->keys_up;
+               while ((key = *keyp++)) {
+                       if (key >= KEY_MAX)
+                               continue;
+                       up_size++;
+               }
+       }
+       size = sizeof(struct keycombo_platform_data)
+                       + sizeof(int) * (down_size + 1);
+       pdata_child = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+       if (!pdata_child)
+               goto error;
+       memcpy(pdata_child->keys_down, pdata->keys_down,
+                                               sizeof(int) * down_size);
+       if (up_size > 0) {
+               pdata_child->keys_up = devm_kzalloc(&pdev->dev, up_size + 1,
+                                                               GFP_KERNEL);
+               if (!pdata_child->keys_up)
+                       goto error;
+               memcpy(pdata_child->keys_up, pdata->keys_up,
+                                                       sizeof(int) * up_size);
+               if (!pdata_child->keys_up)
+                       goto error;
+       }
+       state->reset_fn = pdata->reset_fn;
+       pdata_child->key_down_fn = do_reset_fn;
+       pdata_child->priv = state;
+       pdata_child->key_down_delay = pdata->key_down_delay;
+       ret = platform_device_add_data(state->pdev_child, pdata_child, size);
+       if (ret)
+               goto error;
+       platform_set_drvdata(pdev, state);
+       return platform_device_add(state->pdev_child);
+error:
+       platform_device_put(state->pdev_child);
+       return ret;
+}
+
+int keyreset_remove(struct platform_device *pdev)
+{
+       struct keyreset_state *state = platform_get_drvdata(pdev);
+       platform_device_put(state->pdev_child);
+       return 0;
+}
+
+
+struct platform_driver keyreset_driver = {
+       .driver.name = KEYRESET_NAME,
+       .probe = keyreset_probe,
+       .remove = keyreset_remove,
+};
+
+static int __init keyreset_init(void)
+{
+       return platform_driver_register(&keyreset_driver);
+}
+
+static void __exit keyreset_exit(void)
+{
+       return platform_driver_unregister(&keyreset_driver);
+}
+
+module_init(keyreset_init);
+module_exit(keyreset_exit);
index d6d16fa782815481e04609771b09b32e862ce679..25ac47b9a1806e2d04066b57cfb806b4e0d5c3b9 100644 (file)
@@ -341,6 +341,17 @@ config INPUT_ATI_REMOTE2
          To compile this driver as a module, choose M here: the module will be
          called ati_remote2.
 
+config INPUT_KEYCHORD
+       tristate "Key chord input driver support"
+       help
+         Say Y here if you want to enable the key chord driver
+         accessible at /dev/keychord.  This driver can be used
+         for receiving notifications when client specified key
+         combinations are pressed.
+
+         To compile this driver as a module, choose M here: the
+         module will be called keychord.
+
 config INPUT_KEYSPAN_REMOTE
        tristate "Keyspan DMR USB remote control"
        depends on USB_ARCH_HAS_HCD
@@ -509,6 +520,11 @@ config INPUT_SGI_BTNS
          To compile this driver as a module, choose M here: the
          module will be called sgi_btns.
 
+config INPUT_GPIO
+       tristate "GPIO driver support"
+       help
+         Say Y here if you want to support gpio based keys, wheels etc...
+
 config HP_SDC_RTC
        tristate "HP SDC Real Time Clock"
        depends on (GSC || HP300) && SERIO
index 0357a088c6a9e2e132ef1d24d44dce7ace24b554..66c3cc9f181cb390089af9f13f825ad2ded1f1ac 100644 (file)
@@ -34,9 +34,11 @@ obj-$(CONFIG_INPUT_DRV2667_HAPTICS)  += drv2667.o
 obj-$(CONFIG_INPUT_GP2A)               += gp2ap002a00f.o
 obj-$(CONFIG_INPUT_GPIO_BEEPER)                += gpio-beeper.o
 obj-$(CONFIG_INPUT_GPIO_TILT_POLLED)   += gpio_tilt_polled.o
+obj-$(CONFIG_INPUT_GPIO)               += gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o
 obj-$(CONFIG_HP_SDC_RTC)               += hp_sdc_rtc.o
 obj-$(CONFIG_INPUT_IMS_PCU)            += ims-pcu.o
 obj-$(CONFIG_INPUT_IXP4XX_BEEPER)      += ixp4xx-beeper.o
+obj-$(CONFIG_INPUT_KEYCHORD)           += keychord.o
 obj-$(CONFIG_INPUT_KEYSPAN_REMOTE)     += keyspan_remote.o
 obj-$(CONFIG_INPUT_KXTJ9)              += kxtj9.o
 obj-$(CONFIG_INPUT_M68K_BEEP)          += m68kspkr.o
diff --git a/drivers/input/misc/gpio_axis.c b/drivers/input/misc/gpio_axis.c
new file mode 100644 (file)
index 0000000..0acf4a5
--- /dev/null
@@ -0,0 +1,192 @@
+/* drivers/input/misc/gpio_axis.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+struct gpio_axis_state {
+       struct gpio_event_input_devs *input_devs;
+       struct gpio_event_axis_info *info;
+       uint32_t pos;
+};
+
+uint16_t gpio_axis_4bit_gray_map_table[] = {
+       [0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */
+       [0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */
+       [0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */
+       [0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */
+       [0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */
+       [0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */
+       [0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */
+       [0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */
+};
+uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in)
+{
+       return gpio_axis_4bit_gray_map_table[in];
+}
+
+uint16_t gpio_axis_5bit_singletrack_map_table[] = {
+       [0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /*     10000 10100 11100 */
+       [0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /*     11110 11010 11000 */
+       [0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /*    01000 01010 01110  */
+       [0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /*    01111 01101 01100  */
+       [0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /*   00100 00101 00111   */
+       [0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /*   10111 10110 00110   */
+       [0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /*  00010 10010 10011    */
+       [0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /*  11011 01011 00011    */
+       [0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001     */
+       [0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001     */
+};
+uint16_t gpio_axis_5bit_singletrack_map(
+       struct gpio_event_axis_info *info, uint16_t in)
+{
+       return gpio_axis_5bit_singletrack_map_table[in];
+}
+
+static void gpio_event_update_axis(struct gpio_axis_state *as, int report)
+{
+       struct gpio_event_axis_info *ai = as->info;
+       int i;
+       int change;
+       uint16_t state = 0;
+       uint16_t pos;
+       uint16_t old_pos = as->pos;
+       for (i = ai->count - 1; i >= 0; i--)
+               state = (state << 1) | gpio_get_value(ai->gpio[i]);
+       pos = ai->map(ai, state);
+       if (ai->flags & GPIOEAF_PRINT_RAW)
+               pr_info("axis %d-%d raw %x, pos %d -> %d\n",
+                       ai->type, ai->code, state, old_pos, pos);
+       if (report && pos != old_pos) {
+               if (ai->type == EV_REL) {
+                       change = (ai->decoded_size + pos - old_pos) %
+                                 ai->decoded_size;
+                       if (change > ai->decoded_size / 2)
+                               change -= ai->decoded_size;
+                       if (change == ai->decoded_size / 2) {
+                               if (ai->flags & GPIOEAF_PRINT_EVENT)
+                                       pr_info("axis %d-%d unknown direction, "
+                                               "pos %d -> %d\n", ai->type,
+                                               ai->code, old_pos, pos);
+                               change = 0; /* no closest direction */
+                       }
+                       if (ai->flags & GPIOEAF_PRINT_EVENT)
+                               pr_info("axis %d-%d change %d\n",
+                                       ai->type, ai->code, change);
+                       input_report_rel(as->input_devs->dev[ai->dev],
+                                               ai->code, change);
+               } else {
+                       if (ai->flags & GPIOEAF_PRINT_EVENT)
+                               pr_info("axis %d-%d now %d\n",
+                                       ai->type, ai->code, pos);
+                       input_event(as->input_devs->dev[ai->dev],
+                                       ai->type, ai->code, pos);
+               }
+               input_sync(as->input_devs->dev[ai->dev]);
+       }
+       as->pos = pos;
+}
+
+static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id)
+{
+       struct gpio_axis_state *as = dev_id;
+       gpio_event_update_axis(as, 1);
+       return IRQ_HANDLED;
+}
+
+int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+                        struct gpio_event_info *info, void **data, int func)
+{
+       int ret;
+       int i;
+       int irq;
+       struct gpio_event_axis_info *ai;
+       struct gpio_axis_state *as;
+
+       ai = container_of(info, struct gpio_event_axis_info, info);
+       if (func == GPIO_EVENT_FUNC_SUSPEND) {
+               for (i = 0; i < ai->count; i++)
+                       disable_irq(gpio_to_irq(ai->gpio[i]));
+               return 0;
+       }
+       if (func == GPIO_EVENT_FUNC_RESUME) {
+               for (i = 0; i < ai->count; i++)
+                       enable_irq(gpio_to_irq(ai->gpio[i]));
+               return 0;
+       }
+
+       if (func == GPIO_EVENT_FUNC_INIT) {
+               *data = as = kmalloc(sizeof(*as), GFP_KERNEL);
+               if (as == NULL) {
+                       ret = -ENOMEM;
+                       goto err_alloc_axis_state_failed;
+               }
+               as->input_devs = input_devs;
+               as->info = ai;
+               if (ai->dev >= input_devs->count) {
+                       pr_err("gpio_event_axis: bad device index %d >= %d "
+                               "for %d:%d\n", ai->dev, input_devs->count,
+                               ai->type, ai->code);
+                       ret = -EINVAL;
+                       goto err_bad_device_index;
+               }
+
+               input_set_capability(input_devs->dev[ai->dev],
+                                    ai->type, ai->code);
+               if (ai->type == EV_ABS) {
+                       input_set_abs_params(input_devs->dev[ai->dev], ai->code,
+                                            0, ai->decoded_size - 1, 0, 0);
+               }
+               for (i = 0; i < ai->count; i++) {
+                       ret = gpio_request(ai->gpio[i], "gpio_event_axis");
+                       if (ret < 0)
+                               goto err_request_gpio_failed;
+                       ret = gpio_direction_input(ai->gpio[i]);
+                       if (ret < 0)
+                               goto err_gpio_direction_input_failed;
+                       ret = irq = gpio_to_irq(ai->gpio[i]);
+                       if (ret < 0)
+                               goto err_get_irq_num_failed;
+                       ret = request_irq(irq, gpio_axis_irq_handler,
+                                         IRQF_TRIGGER_RISING |
+                                         IRQF_TRIGGER_FALLING,
+                                         "gpio_event_axis", as);
+                       if (ret < 0)
+                               goto err_request_irq_failed;
+               }
+               gpio_event_update_axis(as, 0);
+               return 0;
+       }
+
+       ret = 0;
+       as = *data;
+       for (i = ai->count - 1; i >= 0; i--) {
+               free_irq(gpio_to_irq(ai->gpio[i]), as);
+err_request_irq_failed:
+err_get_irq_num_failed:
+err_gpio_direction_input_failed:
+               gpio_free(ai->gpio[i]);
+err_request_gpio_failed:
+               ;
+       }
+err_bad_device_index:
+       kfree(as);
+       *data = NULL;
+err_alloc_axis_state_failed:
+       return ret;
+}
diff --git a/drivers/input/misc/gpio_event.c b/drivers/input/misc/gpio_event.c
new file mode 100644 (file)
index 0000000..90f07eb
--- /dev/null
@@ -0,0 +1,228 @@
+/* drivers/input/misc/gpio_event.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct gpio_event {
+       struct gpio_event_input_devs *input_devs;
+       const struct gpio_event_platform_data *info;
+       void *state[0];
+};
+
+static int gpio_input_event(
+       struct input_dev *dev, unsigned int type, unsigned int code, int value)
+{
+       int i;
+       int devnr;
+       int ret = 0;
+       int tmp_ret;
+       struct gpio_event_info **ii;
+       struct gpio_event *ip = input_get_drvdata(dev);
+
+       for (devnr = 0; devnr < ip->input_devs->count; devnr++)
+               if (ip->input_devs->dev[devnr] == dev)
+                       break;
+       if (devnr == ip->input_devs->count) {
+               pr_err("gpio_input_event: unknown device %p\n", dev);
+               return -EIO;
+       }
+
+       for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) {
+               if ((*ii)->event) {
+                       tmp_ret = (*ii)->event(ip->input_devs, *ii,
+                                               &ip->state[i],
+                                               devnr, type, code, value);
+                       if (tmp_ret)
+                               ret = tmp_ret;
+               }
+       }
+       return ret;
+}
+
+static int gpio_event_call_all_func(struct gpio_event *ip, int func)
+{
+       int i;
+       int ret;
+       struct gpio_event_info **ii;
+
+       if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) {
+               ii = ip->info->info;
+               for (i = 0; i < ip->info->info_count; i++, ii++) {
+                       if ((*ii)->func == NULL) {
+                               ret = -ENODEV;
+                               pr_err("gpio_event_probe: Incomplete pdata, "
+                                       "no function\n");
+                               goto err_no_func;
+                       }
+                       if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend)
+                               continue;
+                       ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i],
+                                         func);
+                       if (ret) {
+                               pr_err("gpio_event_probe: function failed\n");
+                               goto err_func_failed;
+                       }
+               }
+               return 0;
+       }
+
+       ret = 0;
+       i = ip->info->info_count;
+       ii = ip->info->info + i;
+       while (i > 0) {
+               i--;
+               ii--;
+               if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend)
+                       continue;
+               (*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1);
+err_func_failed:
+err_no_func:
+               ;
+       }
+       return ret;
+}
+
+static void __maybe_unused gpio_event_suspend(struct gpio_event *ip)
+{
+       gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND);
+       if (ip->info->power)
+               ip->info->power(ip->info, 0);
+}
+
+static void __maybe_unused gpio_event_resume(struct gpio_event *ip)
+{
+       if (ip->info->power)
+               ip->info->power(ip->info, 1);
+       gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME);
+}
+
+static int gpio_event_probe(struct platform_device *pdev)
+{
+       int err;
+       struct gpio_event *ip;
+       struct gpio_event_platform_data *event_info;
+       int dev_count = 1;
+       int i;
+       int registered = 0;
+
+       event_info = pdev->dev.platform_data;
+       if (event_info == NULL) {
+               pr_err("gpio_event_probe: No pdata\n");
+               return -ENODEV;
+       }
+       if ((!event_info->name && !event_info->names[0]) ||
+           !event_info->info || !event_info->info_count) {
+               pr_err("gpio_event_probe: Incomplete pdata\n");
+               return -ENODEV;
+       }
+       if (!event_info->name)
+               while (event_info->names[dev_count])
+                       dev_count++;
+       ip = kzalloc(sizeof(*ip) +
+                    sizeof(ip->state[0]) * event_info->info_count +
+                    sizeof(*ip->input_devs) +
+                    sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL);
+       if (ip == NULL) {
+               err = -ENOMEM;
+               pr_err("gpio_event_probe: Failed to allocate private data\n");
+               goto err_kp_alloc_failed;
+       }
+       ip->input_devs = (void*)&ip->state[event_info->info_count];
+       platform_set_drvdata(pdev, ip);
+
+       for (i = 0; i < dev_count; i++) {
+               struct input_dev *input_dev = input_allocate_device();
+               if (input_dev == NULL) {
+                       err = -ENOMEM;
+                       pr_err("gpio_event_probe: "
+                               "Failed to allocate input device\n");
+                       goto err_input_dev_alloc_failed;
+               }
+               input_set_drvdata(input_dev, ip);
+               input_dev->name = event_info->name ?
+                                       event_info->name : event_info->names[i];
+               input_dev->event = gpio_input_event;
+               ip->input_devs->dev[i] = input_dev;
+       }
+       ip->input_devs->count = dev_count;
+       ip->info = event_info;
+       if (event_info->power)
+               ip->info->power(ip->info, 1);
+
+       err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT);
+       if (err)
+               goto err_call_all_func_failed;
+
+       for (i = 0; i < dev_count; i++) {
+               err = input_register_device(ip->input_devs->dev[i]);
+               if (err) {
+                       pr_err("gpio_event_probe: Unable to register %s "
+                               "input device\n", ip->input_devs->dev[i]->name);
+                       goto err_input_register_device_failed;
+               }
+               registered++;
+       }
+
+       return 0;
+
+err_input_register_device_failed:
+       gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+err_call_all_func_failed:
+       if (event_info->power)
+               ip->info->power(ip->info, 0);
+       for (i = 0; i < registered; i++)
+               input_unregister_device(ip->input_devs->dev[i]);
+       for (i = dev_count - 1; i >= registered; i--) {
+               input_free_device(ip->input_devs->dev[i]);
+err_input_dev_alloc_failed:
+               ;
+       }
+       kfree(ip);
+err_kp_alloc_failed:
+       return err;
+}
+
+static int gpio_event_remove(struct platform_device *pdev)
+{
+       struct gpio_event *ip = platform_get_drvdata(pdev);
+       int i;
+
+       gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+       if (ip->info->power)
+               ip->info->power(ip->info, 0);
+       for (i = 0; i < ip->input_devs->count; i++)
+               input_unregister_device(ip->input_devs->dev[i]);
+       kfree(ip);
+       return 0;
+}
+
+static struct platform_driver gpio_event_driver = {
+       .probe          = gpio_event_probe,
+       .remove         = gpio_event_remove,
+       .driver         = {
+               .name   = GPIO_EVENT_DEV_NAME,
+       },
+};
+
+module_platform_driver(gpio_event_driver);
+
+MODULE_DESCRIPTION("GPIO Event Driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/input/misc/gpio_input.c b/drivers/input/misc/gpio_input.c
new file mode 100644 (file)
index 0000000..eefd027
--- /dev/null
@@ -0,0 +1,390 @@
+/* drivers/input/misc/gpio_input.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/pm_wakeup.h>
+
+enum {
+       DEBOUNCE_UNSTABLE     = BIT(0), /* Got irq, while debouncing */
+       DEBOUNCE_PRESSED      = BIT(1),
+       DEBOUNCE_NOTPRESSED   = BIT(2),
+       DEBOUNCE_WAIT_IRQ     = BIT(3), /* Stable irq state */
+       DEBOUNCE_POLL         = BIT(4), /* Stable polling state */
+
+       DEBOUNCE_UNKNOWN =
+               DEBOUNCE_PRESSED | DEBOUNCE_NOTPRESSED,
+};
+
+struct gpio_key_state {
+       struct gpio_input_state *ds;
+       uint8_t debounce;
+};
+
+struct gpio_input_state {
+       struct gpio_event_input_devs *input_devs;
+       const struct gpio_event_input_info *info;
+       struct hrtimer timer;
+       int use_irq;
+       int debounce_count;
+       spinlock_t irq_lock;
+       struct wakeup_source *ws;
+       struct gpio_key_state key_state[0];
+};
+
+static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer)
+{
+       int i;
+       int pressed;
+       struct gpio_input_state *ds =
+               container_of(timer, struct gpio_input_state, timer);
+       unsigned gpio_flags = ds->info->flags;
+       unsigned npolarity;
+       int nkeys = ds->info->keymap_size;
+       const struct gpio_event_direct_entry *key_entry;
+       struct gpio_key_state *key_state;
+       unsigned long irqflags;
+       uint8_t debounce;
+       bool sync_needed;
+
+#if 0
+       key_entry = kp->keys_info->keymap;
+       key_state = kp->key_state;
+       for (i = 0; i < nkeys; i++, key_entry++, key_state++)
+               pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+                       gpio_read_detect_status(key_entry->gpio));
+#endif
+       key_entry = ds->info->keymap;
+       key_state = ds->key_state;
+       sync_needed = false;
+       spin_lock_irqsave(&ds->irq_lock, irqflags);
+       for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+               debounce = key_state->debounce;
+               if (debounce & DEBOUNCE_WAIT_IRQ)
+                       continue;
+               if (key_state->debounce & DEBOUNCE_UNSTABLE) {
+                       debounce = key_state->debounce = DEBOUNCE_UNKNOWN;
+                       enable_irq(gpio_to_irq(key_entry->gpio));
+                       if (gpio_flags & GPIOEDF_PRINT_KEY_UNSTABLE)
+                               pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+                                       "(%d) continue debounce\n",
+                                       ds->info->type, key_entry->code,
+                                       i, key_entry->gpio);
+               }
+               npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH);
+               pressed = gpio_get_value(key_entry->gpio) ^ npolarity;
+               if (debounce & DEBOUNCE_POLL) {
+                       if (pressed == !(debounce & DEBOUNCE_PRESSED)) {
+                               ds->debounce_count++;
+                               key_state->debounce = DEBOUNCE_UNKNOWN;
+                               if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+                                       pr_info("gpio_keys_scan_keys: key %x-"
+                                               "%x, %d (%d) start debounce\n",
+                                               ds->info->type, key_entry->code,
+                                               i, key_entry->gpio);
+                       }
+                       continue;
+               }
+               if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) {
+                       if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+                               pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+                                       "(%d) debounce pressed 1\n",
+                                       ds->info->type, key_entry->code,
+                                       i, key_entry->gpio);
+                       key_state->debounce = DEBOUNCE_PRESSED;
+                       continue;
+               }
+               if (!pressed && (debounce & DEBOUNCE_PRESSED)) {
+                       if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+                               pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+                                       "(%d) debounce pressed 0\n",
+                                       ds->info->type, key_entry->code,
+                                       i, key_entry->gpio);
+                       key_state->debounce = DEBOUNCE_NOTPRESSED;
+                       continue;
+               }
+               /* key is stable */
+               ds->debounce_count--;
+               if (ds->use_irq)
+                       key_state->debounce |= DEBOUNCE_WAIT_IRQ;
+               else
+                       key_state->debounce |= DEBOUNCE_POLL;
+               if (gpio_flags & GPIOEDF_PRINT_KEYS)
+                       pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) "
+                               "changed to %d\n", ds->info->type,
+                               key_entry->code, i, key_entry->gpio, pressed);
+               input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+                           key_entry->code, pressed);
+               sync_needed = true;
+       }
+       if (sync_needed) {
+               for (i = 0; i < ds->input_devs->count; i++)
+                       input_sync(ds->input_devs->dev[i]);
+       }
+
+#if 0
+       key_entry = kp->keys_info->keymap;
+       key_state = kp->key_state;
+       for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+               pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+                       gpio_read_detect_status(key_entry->gpio));
+       }
+#endif
+
+       if (ds->debounce_count)
+               hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL);
+       else if (!ds->use_irq)
+               hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL);
+       else
+               __pm_relax(ds->ws);
+
+       spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+       return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id)
+{
+       struct gpio_key_state *ks = dev_id;
+       struct gpio_input_state *ds = ks->ds;
+       int keymap_index = ks - ds->key_state;
+       const struct gpio_event_direct_entry *key_entry;
+       unsigned long irqflags;
+       int pressed;
+
+       if (!ds->use_irq)
+               return IRQ_HANDLED;
+
+       key_entry = &ds->info->keymap[keymap_index];
+
+       if (ds->info->debounce_time.tv64) {
+               spin_lock_irqsave(&ds->irq_lock, irqflags);
+               if (ks->debounce & DEBOUNCE_WAIT_IRQ) {
+                       ks->debounce = DEBOUNCE_UNKNOWN;
+                       if (ds->debounce_count++ == 0) {
+                               __pm_stay_awake(ds->ws);
+                               hrtimer_start(
+                                       &ds->timer, ds->info->debounce_time,
+                                       HRTIMER_MODE_REL);
+                       }
+                       if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+                               pr_info("gpio_event_input_irq_handler: "
+                                       "key %x-%x, %d (%d) start debounce\n",
+                                       ds->info->type, key_entry->code,
+                                       keymap_index, key_entry->gpio);
+               } else {
+                       disable_irq_nosync(irq);
+                       ks->debounce = DEBOUNCE_UNSTABLE;
+               }
+               spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+       } else {
+               pressed = gpio_get_value(key_entry->gpio) ^
+                       !(ds->info->flags & GPIOEDF_ACTIVE_HIGH);
+               if (ds->info->flags & GPIOEDF_PRINT_KEYS)
+                       pr_info("gpio_event_input_irq_handler: key %x-%x, %d "
+                               "(%d) changed to %d\n",
+                               ds->info->type, key_entry->code, keymap_index,
+                               key_entry->gpio, pressed);
+               input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+                           key_entry->code, pressed);
+               input_sync(ds->input_devs->dev[key_entry->dev]);
+       }
+       return IRQ_HANDLED;
+}
+
+static int gpio_event_input_request_irqs(struct gpio_input_state *ds)
+{
+       int i;
+       int err;
+       unsigned int irq;
+       unsigned long req_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+
+       for (i = 0; i < ds->info->keymap_size; i++) {
+               err = irq = gpio_to_irq(ds->info->keymap[i].gpio);
+               if (err < 0)
+                       goto err_gpio_get_irq_num_failed;
+               err = request_irq(irq, gpio_event_input_irq_handler,
+                                 req_flags, "gpio_keys", &ds->key_state[i]);
+               if (err) {
+                       pr_err("gpio_event_input_request_irqs: request_irq "
+                               "failed for input %d, irq %d\n",
+                               ds->info->keymap[i].gpio, irq);
+                       goto err_request_irq_failed;
+               }
+               if (ds->info->info.no_suspend) {
+                       err = enable_irq_wake(irq);
+                       if (err) {
+                               pr_err("gpio_event_input_request_irqs: "
+                                       "enable_irq_wake failed for input %d, "
+                                       "irq %d\n",
+                                       ds->info->keymap[i].gpio, irq);
+                               goto err_enable_irq_wake_failed;
+                       }
+               }
+       }
+       return 0;
+
+       for (i = ds->info->keymap_size - 1; i >= 0; i--) {
+               irq = gpio_to_irq(ds->info->keymap[i].gpio);
+               if (ds->info->info.no_suspend)
+                       disable_irq_wake(irq);
+err_enable_irq_wake_failed:
+               free_irq(irq, &ds->key_state[i]);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+               ;
+       }
+       return err;
+}
+
+int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+                       struct gpio_event_info *info, void **data, int func)
+{
+       int ret;
+       int i;
+       unsigned long irqflags;
+       struct gpio_event_input_info *di;
+       struct gpio_input_state *ds = *data;
+       char *wlname;
+
+       di = container_of(info, struct gpio_event_input_info, info);
+
+       if (func == GPIO_EVENT_FUNC_SUSPEND) {
+               if (ds->use_irq)
+                       for (i = 0; i < di->keymap_size; i++)
+                               disable_irq(gpio_to_irq(di->keymap[i].gpio));
+               hrtimer_cancel(&ds->timer);
+               return 0;
+       }
+       if (func == GPIO_EVENT_FUNC_RESUME) {
+               spin_lock_irqsave(&ds->irq_lock, irqflags);
+               if (ds->use_irq)
+                       for (i = 0; i < di->keymap_size; i++)
+                               enable_irq(gpio_to_irq(di->keymap[i].gpio));
+               hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+               spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+               return 0;
+       }
+
+       if (func == GPIO_EVENT_FUNC_INIT) {
+               if (ktime_to_ns(di->poll_time) <= 0)
+                       di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC);
+
+               *data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) *
+                                       di->keymap_size, GFP_KERNEL);
+               if (ds == NULL) {
+                       ret = -ENOMEM;
+                       pr_err("gpio_event_input_func: "
+                               "Failed to allocate private data\n");
+                       goto err_ds_alloc_failed;
+               }
+               ds->debounce_count = di->keymap_size;
+               ds->input_devs = input_devs;
+               ds->info = di;
+               wlname = kasprintf(GFP_KERNEL, "gpio_input:%s%s",
+                                  input_devs->dev[0]->name,
+                                  (input_devs->count > 1) ? "..." : "");
+
+               ds->ws = wakeup_source_register(wlname);
+               kfree(wlname);
+               if (!ds->ws) {
+                       ret = -ENOMEM;
+                       pr_err("gpio_event_input_func: "
+                               "Failed to allocate wakeup source\n");
+                       goto err_ws_failed;
+               }
+
+               spin_lock_init(&ds->irq_lock);
+
+               for (i = 0; i < di->keymap_size; i++) {
+                       int dev = di->keymap[i].dev;
+                       if (dev >= input_devs->count) {
+                               pr_err("gpio_event_input_func: bad device "
+                                       "index %d >= %d for key code %d\n",
+                                       dev, input_devs->count,
+                                       di->keymap[i].code);
+                               ret = -EINVAL;
+                               goto err_bad_keymap;
+                       }
+                       input_set_capability(input_devs->dev[dev], di->type,
+                                            di->keymap[i].code);
+                       ds->key_state[i].ds = ds;
+                       ds->key_state[i].debounce = DEBOUNCE_UNKNOWN;
+               }
+
+               for (i = 0; i < di->keymap_size; i++) {
+                       ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in");
+                       if (ret) {
+                               pr_err("gpio_event_input_func: gpio_request "
+                                       "failed for %d\n", di->keymap[i].gpio);
+                               goto err_gpio_request_failed;
+                       }
+                       ret = gpio_direction_input(di->keymap[i].gpio);
+                       if (ret) {
+                               pr_err("gpio_event_input_func: "
+                                       "gpio_direction_input failed for %d\n",
+                                       di->keymap[i].gpio);
+                               goto err_gpio_configure_failed;
+                       }
+               }
+
+               ret = gpio_event_input_request_irqs(ds);
+
+               spin_lock_irqsave(&ds->irq_lock, irqflags);
+               ds->use_irq = ret == 0;
+
+               pr_info("GPIO Input Driver: Start gpio inputs for %s%s in %s "
+                       "mode\n", input_devs->dev[0]->name,
+                       (input_devs->count > 1) ? "..." : "",
+                       ret == 0 ? "interrupt" : "polling");
+
+               hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+               ds->timer.function = gpio_event_input_timer_func;
+               hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+               spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+               return 0;
+       }
+
+       ret = 0;
+       spin_lock_irqsave(&ds->irq_lock, irqflags);
+       hrtimer_cancel(&ds->timer);
+       if (ds->use_irq) {
+               for (i = di->keymap_size - 1; i >= 0; i--) {
+                       int irq = gpio_to_irq(di->keymap[i].gpio);
+                       if (ds->info->info.no_suspend)
+                               disable_irq_wake(irq);
+                       free_irq(irq, &ds->key_state[i]);
+               }
+       }
+       spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+       for (i = di->keymap_size - 1; i >= 0; i--) {
+err_gpio_configure_failed:
+               gpio_free(di->keymap[i].gpio);
+err_gpio_request_failed:
+               ;
+       }
+err_bad_keymap:
+       wakeup_source_unregister(ds->ws);
+err_ws_failed:
+       kfree(ds);
+err_ds_alloc_failed:
+       return ret;
+}
diff --git a/drivers/input/misc/gpio_matrix.c b/drivers/input/misc/gpio_matrix.c
new file mode 100644 (file)
index 0000000..eaa9e89
--- /dev/null
@@ -0,0 +1,441 @@
+/* drivers/input/misc/gpio_matrix.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+struct gpio_kp {
+       struct gpio_event_input_devs *input_devs;
+       struct gpio_event_matrix_info *keypad_info;
+       struct hrtimer timer;
+       struct wake_lock wake_lock;
+       int current_output;
+       unsigned int use_irq:1;
+       unsigned int key_state_changed:1;
+       unsigned int last_key_state_changed:1;
+       unsigned int some_keys_pressed:2;
+       unsigned int disabled_irq:1;
+       unsigned long keys_pressed[0];
+};
+
+static void clear_phantom_key(struct gpio_kp *kp, int out, int in)
+{
+       struct gpio_event_matrix_info *mi = kp->keypad_info;
+       int key_index = out * mi->ninputs + in;
+       unsigned short keyentry = mi->keymap[key_index];
+       unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+       unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+       if (!test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+               if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+                       pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+                               "cleared\n", keycode, out, in,
+                               mi->output_gpios[out], mi->input_gpios[in]);
+               __clear_bit(key_index, kp->keys_pressed);
+       } else {
+               if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+                       pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+                               "not cleared\n", keycode, out, in,
+                               mi->output_gpios[out], mi->input_gpios[in]);
+       }
+}
+
+static int restore_keys_for_input(struct gpio_kp *kp, int out, int in)
+{
+       int rv = 0;
+       int key_index;
+
+       key_index = out * kp->keypad_info->ninputs + in;
+       while (out < kp->keypad_info->noutputs) {
+               if (test_bit(key_index, kp->keys_pressed)) {
+                       rv = 1;
+                       clear_phantom_key(kp, out, in);
+               }
+               key_index += kp->keypad_info->ninputs;
+               out++;
+       }
+       return rv;
+}
+
+static void remove_phantom_keys(struct gpio_kp *kp)
+{
+       int out, in, inp;
+       int key_index;
+
+       if (kp->some_keys_pressed < 3)
+               return;
+
+       for (out = 0; out < kp->keypad_info->noutputs; out++) {
+               inp = -1;
+               key_index = out * kp->keypad_info->ninputs;
+               for (in = 0; in < kp->keypad_info->ninputs; in++, key_index++) {
+                       if (test_bit(key_index, kp->keys_pressed)) {
+                               if (inp == -1) {
+                                       inp = in;
+                                       continue;
+                               }
+                               if (inp >= 0) {
+                                       if (!restore_keys_for_input(kp, out + 1,
+                                                                       inp))
+                                               break;
+                                       clear_phantom_key(kp, out, inp);
+                                       inp = -2;
+                               }
+                               restore_keys_for_input(kp, out, in);
+                       }
+               }
+       }
+}
+
+static void report_key(struct gpio_kp *kp, int key_index, int out, int in)
+{
+       struct gpio_event_matrix_info *mi = kp->keypad_info;
+       int pressed = test_bit(key_index, kp->keys_pressed);
+       unsigned short keyentry = mi->keymap[key_index];
+       unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+       unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+       if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+               if (keycode == KEY_RESERVED) {
+                       if (mi->flags & GPIOKPF_PRINT_UNMAPPED_KEYS)
+                               pr_info("gpiomatrix: unmapped key, %d-%d "
+                                       "(%d-%d) changed to %d\n",
+                                       out, in, mi->output_gpios[out],
+                                       mi->input_gpios[in], pressed);
+               } else {
+                       if (mi->flags & GPIOKPF_PRINT_MAPPED_KEYS)
+                               pr_info("gpiomatrix: key %x, %d-%d (%d-%d) "
+                                       "changed to %d\n", keycode,
+                                       out, in, mi->output_gpios[out],
+                                       mi->input_gpios[in], pressed);
+                       input_report_key(kp->input_devs->dev[dev], keycode, pressed);
+               }
+       }
+}
+
+static void report_sync(struct gpio_kp *kp)
+{
+       int i;
+
+       for (i = 0; i < kp->input_devs->count; i++)
+               input_sync(kp->input_devs->dev[i]);
+}
+
+static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
+{
+       int out, in;
+       int key_index;
+       int gpio;
+       struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
+       struct gpio_event_matrix_info *mi = kp->keypad_info;
+       unsigned gpio_keypad_flags = mi->flags;
+       unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);
+
+       out = kp->current_output;
+       if (out == mi->noutputs) {
+               out = 0;
+               kp->last_key_state_changed = kp->key_state_changed;
+               kp->key_state_changed = 0;
+               kp->some_keys_pressed = 0;
+       } else {
+               key_index = out * mi->ninputs;
+               for (in = 0; in < mi->ninputs; in++, key_index++) {
+                       gpio = mi->input_gpios[in];
+                       if (gpio_get_value(gpio) ^ !polarity) {
+                               if (kp->some_keys_pressed < 3)
+                                       kp->some_keys_pressed++;
+                               kp->key_state_changed |= !__test_and_set_bit(
+                                               key_index, kp->keys_pressed);
+                       } else
+                               kp->key_state_changed |= __test_and_clear_bit(
+                                               key_index, kp->keys_pressed);
+               }
+               gpio = mi->output_gpios[out];
+               if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+                       gpio_set_value(gpio, !polarity);
+               else
+                       gpio_direction_input(gpio);
+               out++;
+       }
+       kp->current_output = out;
+       if (out < mi->noutputs) {
+               gpio = mi->output_gpios[out];
+               if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+                       gpio_set_value(gpio, polarity);
+               else
+                       gpio_direction_output(gpio, polarity);
+               hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
+               return HRTIMER_NORESTART;
+       }
+       if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
+               if (kp->key_state_changed) {
+                       hrtimer_start(&kp->timer, mi->debounce_delay,
+                                     HRTIMER_MODE_REL);
+                       return HRTIMER_NORESTART;
+               }
+               kp->key_state_changed = kp->last_key_state_changed;
+       }
+       if (kp->key_state_changed) {
+               if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
+                       remove_phantom_keys(kp);
+               key_index = 0;
+               for (out = 0; out < mi->noutputs; out++)
+                       for (in = 0; in < mi->ninputs; in++, key_index++)
+                               report_key(kp, key_index, out, in);
+               report_sync(kp);
+       }
+       if (!kp->use_irq || kp->some_keys_pressed) {
+               hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
+               return HRTIMER_NORESTART;
+       }
+
+       /* No keys are pressed, reenable interrupt */
+       for (out = 0; out < mi->noutputs; out++) {
+               if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+                       gpio_set_value(mi->output_gpios[out], polarity);
+               else
+                       gpio_direction_output(mi->output_gpios[out], polarity);
+       }
+       for (in = 0; in < mi->ninputs; in++)
+               enable_irq(gpio_to_irq(mi->input_gpios[in]));
+       wake_unlock(&kp->wake_lock);
+       return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id)
+{
+       int i;
+       struct gpio_kp *kp = dev_id;
+       struct gpio_event_matrix_info *mi = kp->keypad_info;
+       unsigned gpio_keypad_flags = mi->flags;
+
+       if (!kp->use_irq) {
+               /* ignore interrupt while registering the handler */
+               kp->disabled_irq = 1;
+               disable_irq_nosync(irq_in);
+               return IRQ_HANDLED;
+       }
+
+       for (i = 0; i < mi->ninputs; i++)
+               disable_irq_nosync(gpio_to_irq(mi->input_gpios[i]));
+       for (i = 0; i < mi->noutputs; i++) {
+               if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+                       gpio_set_value(mi->output_gpios[i],
+                               !(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH));
+               else
+                       gpio_direction_input(mi->output_gpios[i]);
+       }
+       wake_lock(&kp->wake_lock);
+       hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+       return IRQ_HANDLED;
+}
+
+static int gpio_keypad_request_irqs(struct gpio_kp *kp)
+{
+       int i;
+       int err;
+       unsigned int irq;
+       unsigned long request_flags;
+       struct gpio_event_matrix_info *mi = kp->keypad_info;
+
+       switch (mi->flags & (GPIOKPF_ACTIVE_HIGH|GPIOKPF_LEVEL_TRIGGERED_IRQ)) {
+       default:
+               request_flags = IRQF_TRIGGER_FALLING;
+               break;
+       case GPIOKPF_ACTIVE_HIGH:
+               request_flags = IRQF_TRIGGER_RISING;
+               break;
+       case GPIOKPF_LEVEL_TRIGGERED_IRQ:
+               request_flags = IRQF_TRIGGER_LOW;
+               break;
+       case GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_ACTIVE_HIGH:
+               request_flags = IRQF_TRIGGER_HIGH;
+               break;
+       }
+
+       for (i = 0; i < mi->ninputs; i++) {
+               err = irq = gpio_to_irq(mi->input_gpios[i]);
+               if (err < 0)
+                       goto err_gpio_get_irq_num_failed;
+               err = request_irq(irq, gpio_keypad_irq_handler, request_flags,
+                                 "gpio_kp", kp);
+               if (err) {
+                       pr_err("gpiomatrix: request_irq failed for input %d, "
+                               "irq %d\n", mi->input_gpios[i], irq);
+                       goto err_request_irq_failed;
+               }
+               err = enable_irq_wake(irq);
+               if (err) {
+                       pr_err("gpiomatrix: set_irq_wake failed for input %d, "
+                               "irq %d\n", mi->input_gpios[i], irq);
+               }
+               disable_irq(irq);
+               if (kp->disabled_irq) {
+                       kp->disabled_irq = 0;
+                       enable_irq(irq);
+               }
+       }
+       return 0;
+
+       for (i = mi->noutputs - 1; i >= 0; i--) {
+               free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+               ;
+       }
+       return err;
+}
+
+int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+       struct gpio_event_info *info, void **data, int func)
+{
+       int i;
+       int err;
+       int key_count;
+       struct gpio_kp *kp;
+       struct gpio_event_matrix_info *mi;
+
+       mi = container_of(info, struct gpio_event_matrix_info, info);
+       if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) {
+               /* TODO: disable scanning */
+               return 0;
+       }
+
+       if (func == GPIO_EVENT_FUNC_INIT) {
+               if (mi->keymap == NULL ||
+                  mi->input_gpios == NULL ||
+                  mi->output_gpios == NULL) {
+                       err = -ENODEV;
+                       pr_err("gpiomatrix: Incomplete pdata\n");
+                       goto err_invalid_platform_data;
+               }
+               key_count = mi->ninputs * mi->noutputs;
+
+               *data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) *
+                                    BITS_TO_LONGS(key_count), GFP_KERNEL);
+               if (kp == NULL) {
+                       err = -ENOMEM;
+                       pr_err("gpiomatrix: Failed to allocate private data\n");
+                       goto err_kp_alloc_failed;
+               }
+               kp->input_devs = input_devs;
+               kp->keypad_info = mi;
+               for (i = 0; i < key_count; i++) {
+                       unsigned short keyentry = mi->keymap[i];
+                       unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+                       unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+                       if (dev >= input_devs->count) {
+                               pr_err("gpiomatrix: bad device index %d >= "
+                                       "%d for key code %d\n",
+                                       dev, input_devs->count, keycode);
+                               err = -EINVAL;
+                               goto err_bad_keymap;
+                       }
+                       if (keycode && keycode <= KEY_MAX)
+                               input_set_capability(input_devs->dev[dev],
+                                                       EV_KEY, keycode);
+               }
+
+               for (i = 0; i < mi->noutputs; i++) {
+                       err = gpio_request(mi->output_gpios[i], "gpio_kp_out");
+                       if (err) {
+                               pr_err("gpiomatrix: gpio_request failed for "
+                                       "output %d\n", mi->output_gpios[i]);
+                               goto err_request_output_gpio_failed;
+                       }
+                       if (gpio_cansleep(mi->output_gpios[i])) {
+                               pr_err("gpiomatrix: unsupported output gpio %d,"
+                                       " can sleep\n", mi->output_gpios[i]);
+                               err = -EINVAL;
+                               goto err_output_gpio_configure_failed;
+                       }
+                       if (mi->flags & GPIOKPF_DRIVE_INACTIVE)
+                               err = gpio_direction_output(mi->output_gpios[i],
+                                       !(mi->flags & GPIOKPF_ACTIVE_HIGH));
+                       else
+                               err = gpio_direction_input(mi->output_gpios[i]);
+                       if (err) {
+                               pr_err("gpiomatrix: gpio_configure failed for "
+                                       "output %d\n", mi->output_gpios[i]);
+                               goto err_output_gpio_configure_failed;
+                       }
+               }
+               for (i = 0; i < mi->ninputs; i++) {
+                       err = gpio_request(mi->input_gpios[i], "gpio_kp_in");
+                       if (err) {
+                               pr_err("gpiomatrix: gpio_request failed for "
+                                       "input %d\n", mi->input_gpios[i]);
+                               goto err_request_input_gpio_failed;
+                       }
+                       err = gpio_direction_input(mi->input_gpios[i]);
+                       if (err) {
+                               pr_err("gpiomatrix: gpio_direction_input failed"
+                                       " for input %d\n", mi->input_gpios[i]);
+                               goto err_gpio_direction_input_failed;
+                       }
+               }
+               kp->current_output = mi->noutputs;
+               kp->key_state_changed = 1;
+
+               hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+               kp->timer.function = gpio_keypad_timer_func;
+               wake_lock_init(&kp->wake_lock, WAKE_LOCK_SUSPEND, "gpio_kp");
+               err = gpio_keypad_request_irqs(kp);
+               kp->use_irq = err == 0;
+
+               pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for "
+                       "%s%s in %s mode\n", input_devs->dev[0]->name,
+                       (input_devs->count > 1) ? "..." : "",
+                       kp->use_irq ? "interrupt" : "polling");
+
+               if (kp->use_irq)
+                       wake_lock(&kp->wake_lock);
+               hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+               return 0;
+       }
+
+       err = 0;
+       kp = *data;
+
+       if (kp->use_irq)
+               for (i = mi->noutputs - 1; i >= 0; i--)
+                       free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+
+       hrtimer_cancel(&kp->timer);
+       wake_lock_destroy(&kp->wake_lock);
+       for (i = mi->noutputs - 1; i >= 0; i--) {
+err_gpio_direction_input_failed:
+               gpio_free(mi->input_gpios[i]);
+err_request_input_gpio_failed:
+               ;
+       }
+       for (i = mi->noutputs - 1; i >= 0; i--) {
+err_output_gpio_configure_failed:
+               gpio_free(mi->output_gpios[i]);
+err_request_output_gpio_failed:
+               ;
+       }
+err_bad_keymap:
+       kfree(kp);
+err_kp_alloc_failed:
+err_invalid_platform_data:
+       return err;
+}
diff --git a/drivers/input/misc/gpio_output.c b/drivers/input/misc/gpio_output.c
new file mode 100644 (file)
index 0000000..2aac2fa
--- /dev/null
@@ -0,0 +1,97 @@
+/* drivers/input/misc/gpio_output.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+
+int gpio_event_output_event(
+       struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+       void **data, unsigned int dev, unsigned int type,
+       unsigned int code, int value)
+{
+       int i;
+       struct gpio_event_output_info *oi;
+       oi = container_of(info, struct gpio_event_output_info, info);
+       if (type != oi->type)
+               return 0;
+       if (!(oi->flags & GPIOEDF_ACTIVE_HIGH))
+               value = !value;
+       for (i = 0; i < oi->keymap_size; i++)
+               if (dev == oi->keymap[i].dev && code == oi->keymap[i].code)
+                       gpio_set_value(oi->keymap[i].gpio, value);
+       return 0;
+}
+
+int gpio_event_output_func(
+       struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+       void **data, int func)
+{
+       int ret;
+       int i;
+       struct gpio_event_output_info *oi;
+       oi = container_of(info, struct gpio_event_output_info, info);
+
+       if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME)
+               return 0;
+
+       if (func == GPIO_EVENT_FUNC_INIT) {
+               int output_level = !(oi->flags & GPIOEDF_ACTIVE_HIGH);
+
+               for (i = 0; i < oi->keymap_size; i++) {
+                       int dev = oi->keymap[i].dev;
+                       if (dev >= input_devs->count) {
+                               pr_err("gpio_event_output_func: bad device "
+                                       "index %d >= %d for key code %d\n",
+                                       dev, input_devs->count,
+                                       oi->keymap[i].code);
+                               ret = -EINVAL;
+                               goto err_bad_keymap;
+                       }
+                       input_set_capability(input_devs->dev[dev], oi->type,
+                                            oi->keymap[i].code);
+               }
+
+               for (i = 0; i < oi->keymap_size; i++) {
+                       ret = gpio_request(oi->keymap[i].gpio,
+                                          "gpio_event_output");
+                       if (ret) {
+                               pr_err("gpio_event_output_func: gpio_request "
+                                       "failed for %d\n", oi->keymap[i].gpio);
+                               goto err_gpio_request_failed;
+                       }
+                       ret = gpio_direction_output(oi->keymap[i].gpio,
+                                                   output_level);
+                       if (ret) {
+                               pr_err("gpio_event_output_func: "
+                                       "gpio_direction_output failed for %d\n",
+                                       oi->keymap[i].gpio);
+                               goto err_gpio_direction_output_failed;
+                       }
+               }
+               return 0;
+       }
+
+       ret = 0;
+       for (i = oi->keymap_size - 1; i >= 0; i--) {
+err_gpio_direction_output_failed:
+               gpio_free(oi->keymap[i].gpio);
+err_gpio_request_failed:
+               ;
+       }
+err_bad_keymap:
+       return ret;
+}
+
diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c
new file mode 100644 (file)
index 0000000..a5ea27a
--- /dev/null
@@ -0,0 +1,391 @@
+/*
+ *  drivers/input/misc/keychord.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/keychord.h>
+#include <linux/sched.h>
+
+#define KEYCHORD_NAME          "keychord"
+#define BUFFER_SIZE                    16
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Key chord input driver");
+MODULE_SUPPORTED_DEVICE("keychord");
+MODULE_LICENSE("GPL");
+
+#define NEXT_KEYCHORD(kc) ((struct input_keychord *) \
+               ((char *)kc + sizeof(struct input_keychord) + \
+               kc->count * sizeof(kc->keycodes[0])))
+
+struct keychord_device {
+       struct input_handler    input_handler;
+       int                     registered;
+
+       /* list of keychords to monitor */
+       struct input_keychord   *keychords;
+       int                     keychord_count;
+
+       /* bitmask of keys contained in our keychords */
+       unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+       /* current state of the keys */
+       unsigned long keystate[BITS_TO_LONGS(KEY_CNT)];
+       /* number of keys that are currently pressed */
+       int key_down;
+
+       /* second input_device_id is needed for null termination */
+       struct input_device_id  device_ids[2];
+
+       spinlock_t              lock;
+       wait_queue_head_t       waitq;
+       unsigned char           head;
+       unsigned char           tail;
+       __u16                   buff[BUFFER_SIZE];
+};
+
+static int check_keychord(struct keychord_device *kdev,
+               struct input_keychord *keychord)
+{
+       int i;
+
+       if (keychord->count != kdev->key_down)
+               return 0;
+
+       for (i = 0; i < keychord->count; i++) {
+               if (!test_bit(keychord->keycodes[i], kdev->keystate))
+                       return 0;
+       }
+
+       /* we have a match */
+       return 1;
+}
+
+static void keychord_event(struct input_handle *handle, unsigned int type,
+                          unsigned int code, int value)
+{
+       struct keychord_device *kdev = handle->private;
+       struct input_keychord *keychord;
+       unsigned long flags;
+       int i, got_chord = 0;
+
+       if (type != EV_KEY || code >= KEY_MAX)
+               return;
+
+       spin_lock_irqsave(&kdev->lock, flags);
+       /* do nothing if key state did not change */
+       if (!test_bit(code, kdev->keystate) == !value)
+               goto done;
+       __change_bit(code, kdev->keystate);
+       if (value)
+               kdev->key_down++;
+       else
+               kdev->key_down--;
+
+       /* don't notify on key up */
+       if (!value)
+               goto done;
+       /* ignore this event if it is not one of the keys we are monitoring */
+       if (!test_bit(code, kdev->keybit))
+               goto done;
+
+       keychord = kdev->keychords;
+       if (!keychord)
+               goto done;
+
+       /* check to see if the keyboard state matches any keychords */
+       for (i = 0; i < kdev->keychord_count; i++) {
+               if (check_keychord(kdev, keychord)) {
+                       kdev->buff[kdev->head] = keychord->id;
+                       kdev->head = (kdev->head + 1) % BUFFER_SIZE;
+                       got_chord = 1;
+                       break;
+               }
+               /* skip to next keychord */
+               keychord = NEXT_KEYCHORD(keychord);
+       }
+
+done:
+       spin_unlock_irqrestore(&kdev->lock, flags);
+
+       if (got_chord) {
+               pr_info("keychord: got keychord id %d. Any tasks: %d\n",
+                       keychord->id,
+                       !list_empty_careful(&kdev->waitq.task_list));
+               wake_up_interruptible(&kdev->waitq);
+       }
+}
+
+static int keychord_connect(struct input_handler *handler,
+                                         struct input_dev *dev,
+                                         const struct input_device_id *id)
+{
+       int i, ret;
+       struct input_handle *handle;
+       struct keychord_device *kdev =
+               container_of(handler, struct keychord_device, input_handler);
+
+       /*
+        * ignore this input device if it does not contain any keycodes
+        * that we are monitoring
+        */
+       for (i = 0; i < KEY_MAX; i++) {
+               if (test_bit(i, kdev->keybit) && test_bit(i, dev->keybit))
+                       break;
+       }
+       if (i == KEY_MAX)
+               return -ENODEV;
+
+       handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+       if (!handle)
+               return -ENOMEM;
+
+       handle->dev = dev;
+       handle->handler = handler;
+       handle->name = KEYCHORD_NAME;
+       handle->private = kdev;
+
+       ret = input_register_handle(handle);
+       if (ret)
+               goto err_input_register_handle;
+
+       ret = input_open_device(handle);
+       if (ret)
+               goto err_input_open_device;
+
+       pr_info("keychord: using input dev %s for fevent\n", dev->name);
+
+       return 0;
+
+err_input_open_device:
+       input_unregister_handle(handle);
+err_input_register_handle:
+       kfree(handle);
+       return ret;
+}
+
+static void keychord_disconnect(struct input_handle *handle)
+{
+       input_close_device(handle);
+       input_unregister_handle(handle);
+       kfree(handle);
+}
+
+/*
+ * keychord_read is used to read keychord events from the driver
+ */
+static ssize_t keychord_read(struct file *file, char __user *buffer,
+               size_t count, loff_t *ppos)
+{
+       struct keychord_device *kdev = file->private_data;
+       __u16   id;
+       int retval;
+       unsigned long flags;
+
+       if (count < sizeof(id))
+               return -EINVAL;
+       count = sizeof(id);
+
+       if (kdev->head == kdev->tail && (file->f_flags & O_NONBLOCK))
+               return -EAGAIN;
+
+       retval = wait_event_interruptible(kdev->waitq,
+                       kdev->head != kdev->tail);
+       if (retval)
+               return retval;
+
+       spin_lock_irqsave(&kdev->lock, flags);
+       /* pop a keychord ID off the queue */
+       id = kdev->buff[kdev->tail];
+       kdev->tail = (kdev->tail + 1) % BUFFER_SIZE;
+       spin_unlock_irqrestore(&kdev->lock, flags);
+
+       if (copy_to_user(buffer, &id, count))
+               return -EFAULT;
+
+       return count;
+}
+
+/*
+ * keychord_write is used to configure the driver
+ */
+static ssize_t keychord_write(struct file *file, const char __user *buffer,
+               size_t count, loff_t *ppos)
+{
+       struct keychord_device *kdev = file->private_data;
+       struct input_keychord *keychords = 0;
+       struct input_keychord *keychord, *next, *end;
+       int ret, i, key;
+       unsigned long flags;
+
+       if (count < sizeof(struct input_keychord))
+               return -EINVAL;
+       keychords = kzalloc(count, GFP_KERNEL);
+       if (!keychords)
+               return -ENOMEM;
+
+       /* read list of keychords from userspace */
+       if (copy_from_user(keychords, buffer, count)) {
+               kfree(keychords);
+               return -EFAULT;
+       }
+
+       /* unregister handler before changing configuration */
+       if (kdev->registered) {
+               input_unregister_handler(&kdev->input_handler);
+               kdev->registered = 0;
+       }
+
+       spin_lock_irqsave(&kdev->lock, flags);
+       /* clear any existing configuration */
+       kfree(kdev->keychords);
+       kdev->keychords = 0;
+       kdev->keychord_count = 0;
+       kdev->key_down = 0;
+       memset(kdev->keybit, 0, sizeof(kdev->keybit));
+       memset(kdev->keystate, 0, sizeof(kdev->keystate));
+       kdev->head = kdev->tail = 0;
+
+       keychord = keychords;
+       end = (struct input_keychord *)((char *)keychord + count);
+
+       while (keychord < end) {
+               next = NEXT_KEYCHORD(keychord);
+               if (keychord->count <= 0 || next > end) {
+                       pr_err("keychord: invalid keycode count %d\n",
+                               keychord->count);
+                       goto err_unlock_return;
+               }
+               if (keychord->version != KEYCHORD_VERSION) {
+                       pr_err("keychord: unsupported version %d\n",
+                               keychord->version);
+                       goto err_unlock_return;
+               }
+
+               /* keep track of the keys we are monitoring in keybit */
+               for (i = 0; i < keychord->count; i++) {
+                       key = keychord->keycodes[i];
+                       if (key < 0 || key >= KEY_CNT) {
+                               pr_err("keychord: keycode %d out of range\n",
+                                       key);
+                               goto err_unlock_return;
+                       }
+                       __set_bit(key, kdev->keybit);
+               }
+
+               kdev->keychord_count++;
+               keychord = next;
+       }
+
+       kdev->keychords = keychords;
+       spin_unlock_irqrestore(&kdev->lock, flags);
+
+       ret = input_register_handler(&kdev->input_handler);
+       if (ret) {
+               kfree(keychords);
+               kdev->keychords = 0;
+               return ret;
+       }
+       kdev->registered = 1;
+
+       return count;
+
+err_unlock_return:
+       spin_unlock_irqrestore(&kdev->lock, flags);
+       kfree(keychords);
+       return -EINVAL;
+}
+
+static unsigned int keychord_poll(struct file *file, poll_table *wait)
+{
+       struct keychord_device *kdev = file->private_data;
+
+       poll_wait(file, &kdev->waitq, wait);
+
+       if (kdev->head != kdev->tail)
+               return POLLIN | POLLRDNORM;
+
+       return 0;
+}
+
+static int keychord_open(struct inode *inode, struct file *file)
+{
+       struct keychord_device *kdev;
+
+       kdev = kzalloc(sizeof(struct keychord_device), GFP_KERNEL);
+       if (!kdev)
+               return -ENOMEM;
+
+       spin_lock_init(&kdev->lock);
+       init_waitqueue_head(&kdev->waitq);
+
+       kdev->input_handler.event = keychord_event;
+       kdev->input_handler.connect = keychord_connect;
+       kdev->input_handler.disconnect = keychord_disconnect;
+       kdev->input_handler.name = KEYCHORD_NAME;
+       kdev->input_handler.id_table = kdev->device_ids;
+
+       kdev->device_ids[0].flags = INPUT_DEVICE_ID_MATCH_EVBIT;
+       __set_bit(EV_KEY, kdev->device_ids[0].evbit);
+
+       file->private_data = kdev;
+
+       return 0;
+}
+
+static int keychord_release(struct inode *inode, struct file *file)
+{
+       struct keychord_device *kdev = file->private_data;
+
+       if (kdev->registered)
+               input_unregister_handler(&kdev->input_handler);
+       kfree(kdev);
+
+       return 0;
+}
+
+static const struct file_operations keychord_fops = {
+       .owner          = THIS_MODULE,
+       .open           = keychord_open,
+       .release        = keychord_release,
+       .read           = keychord_read,
+       .write          = keychord_write,
+       .poll           = keychord_poll,
+};
+
+static struct miscdevice keychord_misc = {
+       .fops           = &keychord_fops,
+       .name           = KEYCHORD_NAME,
+       .minor          = MISC_DYNAMIC_MINOR,
+};
+
+static int __init keychord_init(void)
+{
+       return misc_register(&keychord_misc);
+}
+
+static void __exit keychord_exit(void)
+{
+       misc_deregister(&keychord_misc);
+}
+
+module_init(keychord_init);
+module_exit(keychord_exit);
index 7913fdcfc8496bdd7c719a8c3334f6136c74eced..6035794bc1f208809aa0175685a3a15f58fc2c7d 100644 (file)
@@ -458,6 +458,18 @@ config DM_VERITY
 
          If unsure, say N.
 
+config DM_VERITY_FEC
+       bool "Verity forward error correction support"
+       depends on DM_VERITY
+       select REED_SOLOMON
+       select REED_SOLOMON_DEC8
+       ---help---
+         Add forward error correction support to dm-verity. This option
+         makes it possible to use pre-generated error correction data to
+         recover from corrupted blocks.
+
+         If unsure, say N.
+
 config DM_SWITCH
        tristate "Switch target support (EXPERIMENTAL)"
        depends on BLK_DEV_DM
@@ -488,4 +500,21 @@ config DM_LOG_WRITES
 
          If unsure, say N.
 
+config DM_ANDROID_VERITY
+       tristate "Android verity target support"
+       depends on DM_VERITY
+       depends on X509_CERTIFICATE_PARSER
+       depends on SYSTEM_TRUSTED_KEYRING
+       depends on PUBLIC_KEY_ALGO_RSA
+       depends on KEYS
+       depends on ASYMMETRIC_KEY_TYPE
+       depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+       depends on MD_LINEAR
+       ---help---
+         This device-mapper target is virtually a VERITY target. This
+         target is setup by reading the metadata contents piggybacked
+         to the actual data blocks in the block device. The signature
+         of the metadata contents are verified against the key included
+         in the system keyring. Upon success, the underlying verity
+         target is setup.
 endif # MD
index f34979cd141aed02d867f11f3f03c9403d327005..32b5d0a90d603ee51121e257ef2e3ff1b9c85e13 100644 (file)
@@ -16,6 +16,7 @@ dm-cache-mq-y   += dm-cache-policy-mq.o
 dm-cache-smq-y   += dm-cache-policy-smq.o
 dm-cache-cleaner-y += dm-cache-policy-cleaner.o
 dm-era-y       += dm-era-target.o
+dm-verity-y    += dm-verity-target.o
 md-mod-y       += md.o bitmap.o
 raid456-y      += raid5.o raid5-cache.o
 
@@ -59,7 +60,12 @@ obj-$(CONFIG_DM_CACHE_SMQ)   += dm-cache-smq.o
 obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
 obj-$(CONFIG_DM_ERA)           += dm-era.o
 obj-$(CONFIG_DM_LOG_WRITES)    += dm-log-writes.o
+obj-$(CONFIG_DM_ANDROID_VERITY) += dm-android-verity.o
 
 ifeq ($(CONFIG_DM_UEVENT),y)
 dm-mod-objs                    += dm-uevent.o
 endif
+
+ifeq ($(CONFIG_DM_VERITY_FEC),y)
+dm-verity-objs                 += dm-verity-fec.o
+endif
diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c
new file mode 100644 (file)
index 0000000..bb6c128
--- /dev/null
@@ -0,0 +1,925 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/device-mapper.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/key.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/of.h>
+#include <linux/reboot.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#include <asm/setup.h>
+#include <crypto/hash.h>
+#include <crypto/public_key.h>
+#include <crypto/sha.h>
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+
+#include "dm-verity.h"
+#include "dm-android-verity.h"
+
+static char verifiedbootstate[VERITY_COMMANDLINE_PARAM_LENGTH];
+static char veritymode[VERITY_COMMANDLINE_PARAM_LENGTH];
+static char veritykeyid[VERITY_DEFAULT_KEY_ID_LENGTH];
+static char buildvariant[BUILD_VARIANT];
+
+static bool target_added;
+static bool verity_enabled = true;
+struct dentry *debug_dir;
+static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv);
+
+static struct target_type android_verity_target = {
+       .name                   = "android-verity",
+       .version                = {1, 0, 0},
+       .module                 = THIS_MODULE,
+       .ctr                    = android_verity_ctr,
+       .dtr                    = verity_dtr,
+       .map                    = verity_map,
+       .status                 = verity_status,
+       .prepare_ioctl          = verity_prepare_ioctl,
+       .iterate_devices        = verity_iterate_devices,
+       .io_hints               = verity_io_hints,
+};
+
+static int __init verified_boot_state_param(char *line)
+{
+       strlcpy(verifiedbootstate, line, sizeof(verifiedbootstate));
+       return 1;
+}
+
+__setup("androidboot.verifiedbootstate=", verified_boot_state_param);
+
+static int __init verity_mode_param(char *line)
+{
+       strlcpy(veritymode, line, sizeof(veritymode));
+       return 1;
+}
+
+__setup("androidboot.veritymode=", verity_mode_param);
+
+static int __init verity_keyid_param(char *line)
+{
+       strlcpy(veritykeyid, line, sizeof(veritykeyid));
+       return 1;
+}
+
+__setup("veritykeyid=", verity_keyid_param);
+
+static int __init verity_buildvariant(char *line)
+{
+       strlcpy(buildvariant, line, sizeof(buildvariant));
+       return 1;
+}
+
+__setup("buildvariant=", verity_buildvariant);
+
+static inline bool default_verity_key_id(void)
+{
+       return veritykeyid[0] != '\0';
+}
+
+static inline bool is_eng(void)
+{
+       static const char typeeng[]  = "eng";
+
+       return !strncmp(buildvariant, typeeng, sizeof(typeeng));
+}
+
+static inline bool is_userdebug(void)
+{
+       static const char typeuserdebug[]  = "userdebug";
+
+       return !strncmp(buildvariant, typeuserdebug, sizeof(typeuserdebug));
+}
+
+
+static int table_extract_mpi_array(struct public_key_signature *pks,
+                               const void *data, size_t len)
+{
+       MPI mpi = mpi_read_raw_data(data, len);
+
+       if (!mpi) {
+               DMERR("Error while allocating mpi array");
+               return -ENOMEM;
+       }
+
+       pks->mpi[0] = mpi;
+       pks->nr_mpi = 1;
+       return 0;
+}
+
+static struct public_key_signature *table_make_digest(
+                                               enum hash_algo hash,
+                                               const void *table,
+                                               unsigned long table_len)
+{
+       struct public_key_signature *pks = NULL;
+       struct crypto_shash *tfm;
+       struct shash_desc *desc;
+       size_t digest_size, desc_size;
+       int ret;
+
+       /* Allocate the hashing algorithm we're going to need and find out how
+        * big the hash operational data will be.
+        */
+       tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0);
+       if (IS_ERR(tfm))
+               return ERR_CAST(tfm);
+
+       desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
+       digest_size = crypto_shash_digestsize(tfm);
+
+       /* We allocate the hash operational data storage on the end of out
+        * context data and the digest output buffer on the end of that.
+        */
+       ret = -ENOMEM;
+       pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL);
+       if (!pks)
+               goto error;
+
+       pks->pkey_hash_algo = hash;
+       pks->digest = (u8 *)pks + sizeof(*pks) + desc_size;
+       pks->digest_size = digest_size;
+
+       desc = (struct shash_desc *)(pks + 1);
+       desc->tfm = tfm;
+       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       ret = crypto_shash_init(desc);
+       if (ret < 0)
+               goto error;
+
+       ret = crypto_shash_finup(desc, table, table_len, pks->digest);
+       if (ret < 0)
+               goto error;
+
+       crypto_free_shash(tfm);
+       return pks;
+
+error:
+       kfree(pks);
+       crypto_free_shash(tfm);
+       return ERR_PTR(ret);
+}
+
+static int read_block_dev(struct bio_read *payload, struct block_device *bdev,
+               sector_t offset, int length)
+{
+       struct bio *bio;
+       int err = 0, i;
+
+       payload->number_of_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+
+       bio = bio_alloc(GFP_KERNEL, payload->number_of_pages);
+       if (!bio) {
+               DMERR("Error while allocating bio");
+               return -ENOMEM;
+       }
+
+       bio->bi_bdev = bdev;
+       bio->bi_iter.bi_sector = offset;
+
+       payload->page_io = kzalloc(sizeof(struct page *) *
+               payload->number_of_pages, GFP_KERNEL);
+       if (!payload->page_io) {
+               DMERR("page_io array alloc failed");
+               err = -ENOMEM;
+               goto free_bio;
+       }
+
+       for (i = 0; i < payload->number_of_pages; i++) {
+               payload->page_io[i] = alloc_page(GFP_KERNEL);
+               if (!payload->page_io[i]) {
+                       DMERR("alloc_page failed");
+                       err = -ENOMEM;
+                       goto free_pages;
+               }
+               if (!bio_add_page(bio, payload->page_io[i], PAGE_SIZE, 0)) {
+                       DMERR("bio_add_page error");
+                       err = -EIO;
+                       goto free_pages;
+               }
+       }
+
+       if (!submit_bio_wait(READ, bio))
+               /* success */
+               goto free_bio;
+       DMERR("bio read failed");
+       err = -EIO;
+
+free_pages:
+       for (i = 0; i < payload->number_of_pages; i++)
+               if (payload->page_io[i])
+                       __free_page(payload->page_io[i]);
+       kfree(payload->page_io);
+free_bio:
+       bio_put(bio);
+       return err;
+}
+
+static inline u64 fec_div_round_up(u64 x, u64 y)
+{
+       u64 remainder;
+
+       return div64_u64_rem(x, y, &remainder) +
+               (remainder > 0 ? 1 : 0);
+}
+
+static inline void populate_fec_metadata(struct fec_header *header,
+                               struct fec_ecc_metadata *ecc)
+{
+       ecc->blocks = fec_div_round_up(le64_to_cpu(header->inp_size),
+                       FEC_BLOCK_SIZE);
+       ecc->roots = le32_to_cpu(header->roots);
+       ecc->start = le64_to_cpu(header->inp_size);
+}
+
+static inline int validate_fec_header(struct fec_header *header, u64 offset)
+{
+       /* move offset to make the sanity check work for backup header
+        * as well. */
+       offset -= offset % FEC_BLOCK_SIZE;
+       if (le32_to_cpu(header->magic) != FEC_MAGIC ||
+               le32_to_cpu(header->version) != FEC_VERSION ||
+               le32_to_cpu(header->size) != sizeof(struct fec_header) ||
+               le32_to_cpu(header->roots) == 0 ||
+               le32_to_cpu(header->roots) >= FEC_RSM)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int extract_fec_header(dev_t dev, struct fec_header *fec,
+                               struct fec_ecc_metadata *ecc)
+{
+       u64 device_size;
+       struct bio_read payload;
+       int i, err = 0;
+       struct block_device *bdev;
+
+       bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL);
+
+       if (IS_ERR_OR_NULL(bdev)) {
+               DMERR("bdev get error");
+               return PTR_ERR(bdev);
+       }
+
+       device_size = i_size_read(bdev->bd_inode);
+
+       /* fec metadata size is a power of 2 and PAGE_SIZE
+        * is a power of 2 as well.
+        */
+       BUG_ON(FEC_BLOCK_SIZE > PAGE_SIZE);
+       /* 512 byte sector alignment */
+       BUG_ON(((device_size - FEC_BLOCK_SIZE) % (1 << SECTOR_SHIFT)) != 0);
+
+       err = read_block_dev(&payload, bdev, (device_size -
+               FEC_BLOCK_SIZE) / (1 << SECTOR_SHIFT), FEC_BLOCK_SIZE);
+       if (err) {
+               DMERR("Error while reading verity metadata");
+               goto error;
+       }
+
+       BUG_ON(sizeof(struct fec_header) > PAGE_SIZE);
+       memcpy(fec, page_address(payload.page_io[0]),
+                       sizeof(*fec));
+
+       ecc->valid = true;
+       if (validate_fec_header(fec, device_size - FEC_BLOCK_SIZE)) {
+               /* Try the backup header */
+               memcpy(fec, page_address(payload.page_io[0]) + FEC_BLOCK_SIZE
+                       - sizeof(*fec) ,
+                       sizeof(*fec));
+               if (validate_fec_header(fec, device_size -
+                       sizeof(struct fec_header)))
+                       ecc->valid = false;
+       }
+
+       if (ecc->valid)
+               populate_fec_metadata(fec, ecc);
+
+       for (i = 0; i < payload.number_of_pages; i++)
+               __free_page(payload.page_io[i]);
+       kfree(payload.page_io);
+
+error:
+       blkdev_put(bdev, FMODE_READ);
+       return err;
+}
+static void find_metadata_offset(struct fec_header *fec,
+               struct block_device *bdev, u64 *metadata_offset)
+{
+       u64 device_size;
+
+       device_size = i_size_read(bdev->bd_inode);
+
+       if (le32_to_cpu(fec->magic) == FEC_MAGIC)
+               *metadata_offset = le64_to_cpu(fec->inp_size) -
+                                       VERITY_METADATA_SIZE;
+       else
+               *metadata_offset = device_size - VERITY_METADATA_SIZE;
+}
+
+static int find_size(dev_t dev, u64 *device_size)
+{
+       struct block_device *bdev;
+
+       bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL);
+       if (IS_ERR_OR_NULL(bdev)) {
+               DMERR("blkdev_get_by_dev failed");
+               return PTR_ERR(bdev);
+       }
+
+       *device_size = i_size_read(bdev->bd_inode);
+       *device_size >>= SECTOR_SHIFT;
+
+       DMINFO("blkdev size in sectors: %llu", *device_size);
+       blkdev_put(bdev, FMODE_READ);
+       return 0;
+}
+
+static int verify_header(struct android_metadata_header *header)
+{
+       int retval = -EINVAL;
+
+       if (is_userdebug() && le32_to_cpu(header->magic_number) ==
+                       VERITY_METADATA_MAGIC_DISABLE)
+               return VERITY_STATE_DISABLE;
+
+       if (!(le32_to_cpu(header->magic_number) ==
+                       VERITY_METADATA_MAGIC_NUMBER) ||
+                       (le32_to_cpu(header->magic_number) ==
+                       VERITY_METADATA_MAGIC_DISABLE)) {
+               DMERR("Incorrect magic number");
+               return retval;
+       }
+
+       if (le32_to_cpu(header->protocol_version) !=
+                       VERITY_METADATA_VERSION) {
+               DMERR("Unsupported version %u",
+                       le32_to_cpu(header->protocol_version));
+               return retval;
+       }
+
+       return 0;
+}
+
+static int extract_metadata(dev_t dev, struct fec_header *fec,
+                               struct android_metadata **metadata,
+                               bool *verity_enabled)
+{
+       struct block_device *bdev;
+       struct android_metadata_header *header;
+       int i;
+       u32 table_length, copy_length, offset;
+       u64 metadata_offset;
+       struct bio_read payload;
+       int err = 0;
+
+       bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL);
+
+       if (IS_ERR_OR_NULL(bdev)) {
+               DMERR("blkdev_get_by_dev failed");
+               return -ENODEV;
+       }
+
+       find_metadata_offset(fec, bdev, &metadata_offset);
+
+       /* Verity metadata size is a power of 2 and PAGE_SIZE
+        * is a power of 2 as well.
+        * PAGE_SIZE is also a multiple of 512 bytes.
+       */
+       if (VERITY_METADATA_SIZE > PAGE_SIZE)
+               BUG_ON(VERITY_METADATA_SIZE % PAGE_SIZE != 0);
+       /* 512 byte sector alignment */
+       BUG_ON(metadata_offset % (1 << SECTOR_SHIFT) != 0);
+
+       err = read_block_dev(&payload, bdev, metadata_offset /
+               (1 << SECTOR_SHIFT), VERITY_METADATA_SIZE);
+       if (err) {
+               DMERR("Error while reading verity metadata");
+               goto blkdev_release;
+       }
+
+       header = kzalloc(sizeof(*header), GFP_KERNEL);
+       if (!header) {
+               DMERR("kzalloc failed for header");
+               err = -ENOMEM;
+               goto free_payload;
+       }
+
+       memcpy(header, page_address(payload.page_io[0]),
+               sizeof(*header));
+
+       DMINFO("bio magic_number:%u protocol_version:%d table_length:%u",
+               le32_to_cpu(header->magic_number),
+               le32_to_cpu(header->protocol_version),
+               le32_to_cpu(header->table_length));
+
+       err = verify_header(header);
+
+       if (err == VERITY_STATE_DISABLE) {
+               DMERR("Mounting root with verity disabled");
+               *verity_enabled = false;
+               /* we would still have to read the metadata to figure out
+                * the data blocks size. Or may be could map the entire
+                * partition similar to mounting the device.
+                *
+                * Reset error as well as the verity_enabled flag is changed.
+                */
+               err = 0;
+       } else if (err)
+               goto free_header;
+
+       *metadata = kzalloc(sizeof(**metadata), GFP_KERNEL);
+       if (!*metadata) {
+               DMERR("kzalloc for metadata failed");
+               err = -ENOMEM;
+               goto free_header;
+       }
+
+       (*metadata)->header = header;
+       table_length = le32_to_cpu(header->table_length);
+
+       if (table_length == 0 ||
+               table_length > (VERITY_METADATA_SIZE -
+                       sizeof(struct android_metadata_header))) {
+               DMERR("table_length too long");
+               err = -EINVAL;
+               goto free_metadata;
+       }
+
+       (*metadata)->verity_table = kzalloc(table_length + 1, GFP_KERNEL);
+
+       if (!(*metadata)->verity_table) {
+               DMERR("kzalloc verity_table failed");
+               err = -ENOMEM;
+               goto free_metadata;
+       }
+
+       if (sizeof(struct android_metadata_header) +
+                       table_length <= PAGE_SIZE) {
+               memcpy((*metadata)->verity_table,
+                       page_address(payload.page_io[0])
+                       + sizeof(struct android_metadata_header),
+                       table_length);
+       } else {
+               copy_length = PAGE_SIZE -
+                       sizeof(struct android_metadata_header);
+               memcpy((*metadata)->verity_table,
+                       page_address(payload.page_io[0])
+                       + sizeof(struct android_metadata_header),
+                       copy_length);
+               table_length -= copy_length;
+               offset = copy_length;
+               i = 1;
+               while (table_length != 0) {
+                       if (table_length > PAGE_SIZE) {
+                               memcpy((*metadata)->verity_table + offset,
+                                       page_address(payload.page_io[i]),
+                                       PAGE_SIZE);
+                               offset += PAGE_SIZE;
+                               table_length -= PAGE_SIZE;
+                       } else {
+                               memcpy((*metadata)->verity_table + offset,
+                                       page_address(payload.page_io[i]),
+                                       table_length);
+                               table_length = 0;
+                       }
+                       i++;
+               }
+       }
+       (*metadata)->verity_table[table_length] = '\0';
+
+       DMINFO("verity_table: %s", (*metadata)->verity_table);
+       goto free_payload;
+
+free_metadata:
+       kfree(*metadata);
+free_header:
+       kfree(header);
+free_payload:
+       for (i = 0; i < payload.number_of_pages; i++)
+               if (payload.page_io[i])
+                       __free_page(payload.page_io[i]);
+       kfree(payload.page_io);
+blkdev_release:
+       blkdev_put(bdev, FMODE_READ);
+       return err;
+}
+
+/* helper functions to extract properties from dts */
+const char *find_dt_value(const char *name)
+{
+       struct device_node *firmware;
+       const char *value;
+
+       firmware = of_find_node_by_path("/firmware/android");
+       if (!firmware)
+               return NULL;
+       value = of_get_property(firmware, name, NULL);
+       of_node_put(firmware);
+
+       return value;
+}
+
+static int verity_mode(void)
+{
+       static const char enforcing[] = "enforcing";
+       static const char verified_mode_prop[] = "veritymode";
+       const char *value;
+
+       value = find_dt_value(verified_mode_prop);
+       if (!value)
+               value = veritymode;
+       if (!strncmp(value, enforcing, sizeof(enforcing) - 1))
+               return DM_VERITY_MODE_RESTART;
+
+       return DM_VERITY_MODE_EIO;
+}
+
+static int verify_verity_signature(char *key_id,
+               struct android_metadata *metadata)
+{
+       key_ref_t key_ref;
+       struct key *key;
+       struct public_key_signature *pks = NULL;
+       int retval = -EINVAL;
+
+       key_ref = keyring_search(make_key_ref(system_trusted_keyring, 1),
+               &key_type_asymmetric, key_id);
+
+       if (IS_ERR(key_ref)) {
+               DMERR("keyring: key not found");
+               return -ENOKEY;
+       }
+
+       key = key_ref_to_ptr(key_ref);
+
+       pks = table_make_digest(HASH_ALGO_SHA256,
+                       (const void *)metadata->verity_table,
+                       le32_to_cpu(metadata->header->table_length));
+
+       if (IS_ERR(pks)) {
+               DMERR("hashing failed");
+               goto error;
+       }
+
+       retval = table_extract_mpi_array(pks, &metadata->header->signature[0],
+                               RSANUMBYTES);
+       if (retval < 0) {
+               DMERR("Error extracting mpi %d", retval);
+               goto error;
+       }
+
+       retval = verify_signature(key, pks);
+       mpi_free(pks->rsa.s);
+error:
+       kfree(pks);
+       key_put(key);
+
+       return retval;
+}
+
+static void handle_error(void)
+{
+       int mode = verity_mode();
+       if (mode == DM_VERITY_MODE_RESTART) {
+               DMERR("triggering restart");
+               kernel_restart("dm-verity device corrupted");
+       } else {
+               DMERR("Mounting verity root failed");
+       }
+}
+
+static inline bool test_mult_overflow(sector_t a, u32 b)
+{
+       sector_t r = (sector_t)~0ULL;
+
+       sector_div(r, b);
+       return a > r;
+}
+
+static int add_as_linear_device(struct dm_target *ti, char *dev)
+{
+       /*Move to linear mapping defines*/
+       char *linear_table_args[DM_LINEAR_ARGS] = {dev,
+                                       DM_LINEAR_TARGET_OFFSET};
+       int err = 0;
+
+       android_verity_target.dtr = dm_linear_dtr,
+       android_verity_target.map = dm_linear_map,
+       android_verity_target.status = dm_linear_status,
+       android_verity_target.prepare_ioctl = dm_linear_prepare_ioctl,
+       android_verity_target.iterate_devices = dm_linear_iterate_devices,
+       android_verity_target.io_hints = NULL;
+
+       err = dm_linear_ctr(ti, DM_LINEAR_ARGS, linear_table_args);
+
+       if (!err) {
+               DMINFO("Added android-verity as a linear target");
+               target_added = true;
+       } else
+               DMERR("Failed to add android-verity as linear target");
+
+       return err;
+}
+
+/*
+ * Target parameters:
+ *     <key id>        Key id of the public key in the system keyring.
+ *                     Verity metadata's signature would be verified against
+ *                     this. If the key id contains spaces, replace them
+ *                     with '#'.
+ *     <block device>  The block device for which dm-verity is being setup.
+ */
+static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+       dev_t uninitialized_var(dev);
+       struct android_metadata *metadata = NULL;
+       int err = 0, i, mode;
+       char *key_id, *table_ptr, dummy, *target_device,
+       *verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS];
+       /* One for specifying number of opt args and one for mode */
+       sector_t data_sectors;
+       u32 data_block_size;
+       unsigned int no_of_args = VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS;
+       struct fec_header uninitialized_var(fec);
+       struct fec_ecc_metadata uninitialized_var(ecc);
+       char buf[FEC_ARG_LENGTH], *buf_ptr;
+       unsigned long long tmpll;
+       u64  uninitialized_var(device_size);
+
+       if (argc == 1) {
+               /* Use the default keyid */
+               if (default_verity_key_id())
+                       key_id = veritykeyid;
+               else if (!is_eng()) {
+                       DMERR("veritykeyid= is not set");
+                       handle_error();
+                       return -EINVAL;
+               }
+       } else if (argc == 2)
+               key_id = argv[1];
+       else {
+               DMERR("Incorrect number of arguments");
+               handle_error();
+               return -EINVAL;
+       }
+
+       target_device = argv[0];
+
+       dev = name_to_dev_t(target_device);
+       if (!dev) {
+               DMERR("no dev found for %s", target_device);
+               handle_error();
+               return -EINVAL;
+       }
+
+       if (is_eng()) {
+               err = find_size(dev, &device_size);
+               if (err) {
+                       DMERR("error finding bdev size");
+                       handle_error();
+                       return err;
+               }
+
+               ti->len = device_size;
+               err = add_as_linear_device(ti, target_device);
+               if (err) {
+                       handle_error();
+                       return err;
+               }
+               verity_enabled = false;
+               return 0;
+       }
+
+       strreplace(key_id, '#', ' ');
+
+       DMINFO("key:%s dev:%s", key_id, target_device);
+
+       if (extract_fec_header(dev, &fec, &ecc)) {
+               DMERR("Error while extracting fec header");
+               handle_error();
+               return -EINVAL;
+       }
+
+       err = extract_metadata(dev, &fec, &metadata, &verity_enabled);
+
+       if (err) {
+               DMERR("Error while extracting metadata");
+               handle_error();
+               goto free_metadata;
+       }
+
+       if (verity_enabled) {
+               err = verify_verity_signature(key_id, metadata);
+
+               if (err) {
+                       DMERR("Signature verification failed");
+                       handle_error();
+                       goto free_metadata;
+               } else
+                       DMINFO("Signature verification success");
+       }
+
+       table_ptr = metadata->verity_table;
+
+       for (i = 0; i < VERITY_TABLE_ARGS; i++) {
+               verity_table_args[i] = strsep(&table_ptr, " ");
+               if (verity_table_args[i] == NULL)
+                       break;
+       }
+
+       if (i != VERITY_TABLE_ARGS) {
+               DMERR("Verity table not in the expected format");
+               err = -EINVAL;
+               handle_error();
+               goto free_metadata;
+       }
+
+       if (sscanf(verity_table_args[5], "%llu%c", &tmpll, &dummy)
+                                                       != 1) {
+               DMERR("Verity table not in the expected format");
+               handle_error();
+               err = -EINVAL;
+               goto free_metadata;
+       }
+
+       if (tmpll > ULONG_MAX) {
+               DMERR("<num_data_blocks> too large. Forgot to turn on CONFIG_LBDAF?");
+               handle_error();
+               err = -EINVAL;
+               goto free_metadata;
+       }
+
+       data_sectors = tmpll;
+
+       if (sscanf(verity_table_args[3], "%u%c", &data_block_size, &dummy)
+                                                               != 1) {
+               DMERR("Verity table not in the expected format");
+               handle_error();
+               err = -EINVAL;
+               goto free_metadata;
+       }
+
+       if (test_mult_overflow(data_sectors, data_block_size >>
+                                                       SECTOR_SHIFT)) {
+               DMERR("data_sectors too large");
+               handle_error();
+               err = -EOVERFLOW;
+               goto free_metadata;
+       }
+
+       data_sectors *= data_block_size >> SECTOR_SHIFT;
+       DMINFO("Data sectors %llu", (unsigned long long)data_sectors);
+
+       /* update target length */
+       ti->len = data_sectors;
+
+       /* Setup linear target and free */
+       if (!verity_enabled) {
+               err = add_as_linear_device(ti, target_device);
+               goto free_metadata;
+       }
+
+       /*substitute data_dev and hash_dev*/
+       verity_table_args[1] = target_device;
+       verity_table_args[2] = target_device;
+
+       mode = verity_mode();
+
+       if (ecc.valid && IS_BUILTIN(CONFIG_DM_VERITY_FEC)) {
+               if (mode) {
+                       err = snprintf(buf, FEC_ARG_LENGTH,
+                               "%u %s " VERITY_TABLE_OPT_FEC_FORMAT,
+                               1 + VERITY_TABLE_OPT_FEC_ARGS,
+                               mode == DM_VERITY_MODE_RESTART ?
+                                       VERITY_TABLE_OPT_RESTART :
+                                       VERITY_TABLE_OPT_LOGGING,
+                               target_device,
+                               ecc.start / FEC_BLOCK_SIZE, ecc.blocks,
+                               ecc.roots);
+               } else {
+                       err = snprintf(buf, FEC_ARG_LENGTH,
+                               "%u " VERITY_TABLE_OPT_FEC_FORMAT,
+                               VERITY_TABLE_OPT_FEC_ARGS, target_device,
+                               ecc.start / FEC_BLOCK_SIZE, ecc.blocks,
+                               ecc.roots);
+               }
+       } else if (mode) {
+               err = snprintf(buf, FEC_ARG_LENGTH,
+                       "2 " VERITY_TABLE_OPT_IGNZERO " %s",
+                       mode == DM_VERITY_MODE_RESTART ?
+                       VERITY_TABLE_OPT_RESTART : VERITY_TABLE_OPT_LOGGING);
+       } else {
+               err = snprintf(buf, FEC_ARG_LENGTH, "1 %s",
+                                "ignore_zero_blocks");
+       }
+
+       if (err < 0 || err >= FEC_ARG_LENGTH)
+               goto free_metadata;
+
+       buf_ptr = buf;
+
+       for (i = VERITY_TABLE_ARGS; i < (VERITY_TABLE_ARGS +
+               VERITY_TABLE_OPT_FEC_ARGS + 2); i++) {
+               verity_table_args[i] = strsep(&buf_ptr, " ");
+               if (verity_table_args[i] == NULL) {
+                       no_of_args = i;
+                       break;
+               }
+       }
+
+       err = verity_ctr(ti, no_of_args, verity_table_args);
+
+       if (err)
+               DMERR("android-verity failed to mount as verity target");
+       else {
+               target_added = true;
+               DMINFO("android-verity mounted as verity target");
+       }
+
+free_metadata:
+       if (metadata) {
+               kfree(metadata->header);
+               kfree(metadata->verity_table);
+       }
+       kfree(metadata);
+       return err;
+}
+
+static int __init dm_android_verity_init(void)
+{
+       int r;
+       struct dentry *file;
+
+       r = dm_register_target(&android_verity_target);
+       if (r < 0)
+               DMERR("register failed %d", r);
+
+       /* Tracks the status of the last added target */
+       debug_dir = debugfs_create_dir("android_verity", NULL);
+
+       if (IS_ERR_OR_NULL(debug_dir)) {
+               DMERR("Cannot create android_verity debugfs directory: %ld",
+                       PTR_ERR(debug_dir));
+               goto end;
+       }
+
+       file = debugfs_create_bool("target_added", S_IRUGO, debug_dir,
+                               &target_added);
+
+       if (IS_ERR_OR_NULL(file)) {
+               DMERR("Cannot create android_verity debugfs directory: %ld",
+                       PTR_ERR(debug_dir));
+               debugfs_remove_recursive(debug_dir);
+               goto end;
+       }
+
+       file = debugfs_create_bool("verity_enabled", S_IRUGO, debug_dir,
+                               &verity_enabled);
+
+       if (IS_ERR_OR_NULL(file)) {
+               DMERR("Cannot create android_verity debugfs directory: %ld",
+                       PTR_ERR(debug_dir));
+               debugfs_remove_recursive(debug_dir);
+       }
+
+end:
+       return r;
+}
+
+static void __exit dm_android_verity_exit(void)
+{
+       if (!IS_ERR_OR_NULL(debug_dir))
+               debugfs_remove_recursive(debug_dir);
+
+       dm_unregister_target(&android_verity_target);
+}
+
+module_init(dm_android_verity_init);
+module_exit(dm_android_verity_exit);
diff --git a/drivers/md/dm-android-verity.h b/drivers/md/dm-android-verity.h
new file mode 100644 (file)
index 0000000..0c7ff6a
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef DM_ANDROID_VERITY_H
+#define DM_ANDROID_VERITY_H
+
+#include <crypto/sha.h>
+
+#define RSANUMBYTES 256
+#define VERITY_METADATA_MAGIC_NUMBER 0xb001b001
+#define VERITY_METADATA_MAGIC_DISABLE 0x46464f56
+#define VERITY_METADATA_VERSION 0
+#define VERITY_STATE_DISABLE 1
+#define DATA_BLOCK_SIZE (4 * 1024)
+#define VERITY_METADATA_SIZE (8 * DATA_BLOCK_SIZE)
+#define VERITY_TABLE_ARGS 10
+#define VERITY_COMMANDLINE_PARAM_LENGTH 20
+#define BUILD_VARIANT 20
+
+/*
+ * <subject>:<sha1-id> is the format for the identifier.
+ * subject can either be the Common Name(CN) + Organization Name(O) or
+ * just the CN if the it is prefixed with O
+ * From https://tools.ietf.org/html/rfc5280#appendix-A
+ * ub-organization-name-length INTEGER ::= 64
+ * ub-common-name-length INTEGER ::= 64
+ *
+ * http://lxr.free-electrons.com/source/crypto/asymmetric_keys/x509_cert_parser.c?v=3.9#L278
+ * ctx->o_size + 2 + ctx->cn_size + 1
+ * + 41 characters for ":" and sha1 id
+ * 64 + 2 + 64 + 1 + 1 + 40 (172)
+ * setting VERITY_DEFAULT_KEY_ID_LENGTH to 200 characters.
+ */
+#define VERITY_DEFAULT_KEY_ID_LENGTH 200
+
+#define FEC_MAGIC 0xFECFECFE
+#define FEC_BLOCK_SIZE (4 * 1024)
+#define FEC_VERSION 0
+#define FEC_RSM 255
+#define FEC_ARG_LENGTH 300
+
+#define VERITY_TABLE_OPT_RESTART "restart_on_corruption"
+#define VERITY_TABLE_OPT_LOGGING "ignore_corruption"
+#define VERITY_TABLE_OPT_IGNZERO "ignore_zero_blocks"
+
+#define VERITY_TABLE_OPT_FEC_FORMAT \
+       "use_fec_from_device %s fec_start %llu fec_blocks %llu fec_roots %u ignore_zero_blocks"
+#define VERITY_TABLE_OPT_FEC_ARGS 9
+
+#define VERITY_DEBUG 0
+
+#define DM_MSG_PREFIX                   "android-verity"
+
+#define DM_LINEAR_ARGS 2
+#define DM_LINEAR_TARGET_OFFSET "0"
+
+/*
+ * There can be two formats.
+ * if fec is present
+ * <data_blocks> <verity_tree> <verity_metdata_32K><fec_data><fec_data_4K>
+ * if fec is not present
+ * <data_blocks> <verity_tree> <verity_metdata_32K>
+ */
+struct fec_header {
+       __le32 magic;
+       __le32 version;
+       __le32 size;
+       __le32 roots;
+       __le32 fec_size;
+       __le64 inp_size;
+       u8 hash[SHA256_DIGEST_SIZE];
+} __attribute__((packed));
+
+struct android_metadata_header {
+       __le32 magic_number;
+       __le32 protocol_version;
+       char signature[RSANUMBYTES];
+       __le32 table_length;
+};
+
+struct android_metadata {
+       struct android_metadata_header *header;
+       char *verity_table;
+};
+
+struct fec_ecc_metadata {
+       bool valid;
+       u32 roots;
+       u64 blocks;
+       u64 rounds;
+       u64 start;
+};
+
+struct bio_read {
+       struct page **page_io;
+       int number_of_pages;
+};
+
+extern struct target_type linear_target;
+
+extern void dm_linear_dtr(struct dm_target *ti);
+extern int dm_linear_map(struct dm_target *ti, struct bio *bio);
+extern void dm_linear_status(struct dm_target *ti, status_type_t type,
+                       unsigned status_flags, char *result, unsigned maxlen);
+extern int dm_linear_prepare_ioctl(struct dm_target *ti,
+                struct block_device **bdev, fmode_t *mode);
+extern int dm_linear_iterate_devices(struct dm_target *ti,
+                       iterate_devices_callout_fn fn, void *data);
+extern int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv);
+#endif /* DM_ANDROID_VERITY_H */
index 515f83e7d9abfe868a9f5b1613eb6d9e6ba5b1e3..bb9b92ebbf8e5b850e8c3d0243053e9fdb7e1ad2 100644 (file)
@@ -118,14 +118,12 @@ static void iot_io_end(struct io_tracker *iot, sector_t len)
  */
 struct dm_hook_info {
        bio_end_io_t *bi_end_io;
-       void *bi_private;
 };
 
 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
                        bio_end_io_t *bi_end_io, void *bi_private)
 {
        h->bi_end_io = bio->bi_end_io;
-       h->bi_private = bio->bi_private;
 
        bio->bi_end_io = bi_end_io;
        bio->bi_private = bi_private;
@@ -134,7 +132,6 @@ static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
 {
        bio->bi_end_io = h->bi_end_io;
-       bio->bi_private = h->bi_private;
 }
 
 /*----------------------------------------------------------------*/
index 5cac11d7a87630b7b9a7861a2282e5eef1d857cd..e540b7942ebac2b387dd609ed0c7de1f22515040 100644 (file)
@@ -1860,16 +1860,24 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
 
        ret = -ENOMEM;
-       cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
+       cc->io_queue = alloc_workqueue("kcryptd_io",
+                                      WQ_HIGHPRI |
+                                      WQ_MEM_RECLAIM,
+                                      1);
        if (!cc->io_queue) {
                ti->error = "Couldn't create kcryptd io queue";
                goto bad;
        }
 
        if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
-               cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+               cc->crypt_queue = alloc_workqueue("kcryptd",
+                                                 WQ_HIGHPRI |
+                                                 WQ_MEM_RECLAIM, 1);
        else
-               cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+               cc->crypt_queue = alloc_workqueue("kcryptd",
+                                                 WQ_HIGHPRI |
+                                                 WQ_MEM_RECLAIM |
+                                                 WQ_UNBOUND,
                                                  num_online_cpus());
        if (!cc->crypt_queue) {
                ti->error = "Couldn't create kcryptd queue";
index 80a43954325966a872cf8ebc95fb23c92c0b8819..bc5e9a5b1f302b45c96fb6e874f04022ed9031b2 100644 (file)
@@ -1923,6 +1923,45 @@ void dm_interface_exit(void)
        dm_hash_exit();
 }
 
+
+/**
+ * dm_ioctl_export - Permanently export a mapped device via the ioctl interface
+ * @md: Pointer to mapped_device
+ * @name: Buffer (size DM_NAME_LEN) for name
+ * @uuid: Buffer (size DM_UUID_LEN) for uuid or NULL if not desired
+ */
+int dm_ioctl_export(struct mapped_device *md, const char *name,
+                   const char *uuid)
+{
+       int r = 0;
+       struct hash_cell *hc;
+
+       if (!md) {
+               r = -ENXIO;
+               goto out;
+       }
+
+       /* The name and uuid can only be set once. */
+       mutex_lock(&dm_hash_cells_mutex);
+       hc = dm_get_mdptr(md);
+       mutex_unlock(&dm_hash_cells_mutex);
+       if (hc) {
+               DMERR("%s: already exported", dm_device_name(md));
+               r = -ENXIO;
+               goto out;
+       }
+
+       r = dm_hash_insert(name, uuid, md);
+       if (r) {
+               DMERR("%s: could not bind to '%s'", dm_device_name(md), name);
+               goto out;
+       }
+
+       /* Let udev know we've changed. */
+       dm_kobject_uevent(md, KOBJ_CHANGE, dm_get_event_nr(md));
+out:
+       return r;
+}
 /**
  * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers
  * @md: Pointer to mapped_device
index 05c35aacb3aaeadb4cfddd73a14fed73a7c1bbe0..2ff5f32a4b99c1762d9c293902af89a56ac6bc5a 100644 (file)
@@ -25,7 +25,7 @@ struct linear_c {
 /*
  * Construct a linear mapping: <dev_path> <offset>
  */
-static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 {
        struct linear_c *lc;
        unsigned long long tmp;
@@ -66,14 +66,16 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        kfree(lc);
        return ret;
 }
+EXPORT_SYMBOL_GPL(dm_linear_ctr);
 
-static void linear_dtr(struct dm_target *ti)
+void dm_linear_dtr(struct dm_target *ti)
 {
        struct linear_c *lc = (struct linear_c *) ti->private;
 
        dm_put_device(ti, lc->dev);
        kfree(lc);
 }
+EXPORT_SYMBOL_GPL(dm_linear_dtr);
 
 static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
 {
@@ -92,14 +94,15 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
                        linear_map_sector(ti, bio->bi_iter.bi_sector);
 }
 
-static int linear_map(struct dm_target *ti, struct bio *bio)
+int dm_linear_map(struct dm_target *ti, struct bio *bio)
 {
        linear_map_bio(ti, bio);
 
        return DM_MAPIO_REMAPPED;
 }
+EXPORT_SYMBOL_GPL(dm_linear_map);
 
-static void linear_status(struct dm_target *ti, status_type_t type,
+void dm_linear_status(struct dm_target *ti, status_type_t type,
                          unsigned status_flags, char *result, unsigned maxlen)
 {
        struct linear_c *lc = (struct linear_c *) ti->private;
@@ -115,8 +118,9 @@ static void linear_status(struct dm_target *ti, status_type_t type,
                break;
        }
 }
+EXPORT_SYMBOL_GPL(dm_linear_status);
 
-static int linear_prepare_ioctl(struct dm_target *ti,
+int dm_linear_prepare_ioctl(struct dm_target *ti,
                struct block_device **bdev, fmode_t *mode)
 {
        struct linear_c *lc = (struct linear_c *) ti->private;
@@ -132,25 +136,27 @@ static int linear_prepare_ioctl(struct dm_target *ti,
                return 1;
        return 0;
 }
+EXPORT_SYMBOL_GPL(dm_linear_prepare_ioctl);
 
-static int linear_iterate_devices(struct dm_target *ti,
+int dm_linear_iterate_devices(struct dm_target *ti,
                                  iterate_devices_callout_fn fn, void *data)
 {
        struct linear_c *lc = ti->private;
 
        return fn(ti, lc->dev, lc->start, ti->len, data);
 }
+EXPORT_SYMBOL_GPL(dm_linear_iterate_devices);
 
 static struct target_type linear_target = {
        .name   = "linear",
        .version = {1, 2, 1},
        .module = THIS_MODULE,
-       .ctr    = linear_ctr,
-       .dtr    = linear_dtr,
-       .map    = linear_map,
-       .status = linear_status,
-       .prepare_ioctl linear_prepare_ioctl,
-       .iterate_devices = linear_iterate_devices,
+       .ctr    = dm_linear_ctr,
+       .dtr    = dm_linear_dtr,
+       .map    = dm_linear_map,
+       .status = dm_linear_status,
+       .prepare_ioctl  = dm_linear_prepare_ioctl,
+       .iterate_devices = dm_linear_iterate_devices,
 };
 
 int __init dm_linear_init(void)
index e108deebbaaa9e9c8578106eac09d83fb0b2892a..e4d1bafe78c1ab63e411eedd26c3c81d17713e66 100644 (file)
@@ -207,7 +207,6 @@ struct dm_snap_pending_exception {
         */
        struct bio *full_bio;
        bio_end_io_t *full_bio_end_io;
-       void *full_bio_private;
 };
 
 /*
@@ -1495,10 +1494,8 @@ out:
        snapshot_bios = bio_list_get(&pe->snapshot_bios);
        origin_bios = bio_list_get(&pe->origin_bios);
        full_bio = pe->full_bio;
-       if (full_bio) {
+       if (full_bio)
                full_bio->bi_end_io = pe->full_bio_end_io;
-               full_bio->bi_private = pe->full_bio_private;
-       }
        increment_pending_exceptions_done_count();
 
        up_write(&s->lock);
@@ -1604,7 +1601,6 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
 
        pe->full_bio = bio;
        pe->full_bio_end_io = bio->bi_end_io;
-       pe->full_bio_private = bio->bi_private;
 
        callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
                                                   copy_callback, pe);
index cb5d0daf53bb65ba0c47c330ce533590840adb46..b3d78bba3a79a450f8dc92b9b3bb37e2d989e107 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/vmalloc.h>
 #include <linux/blkdev.h>
 #include <linux/namei.h>
+#include <linux/mount.h>
 #include <linux/ctype.h>
 #include <linux/string.h>
 #include <linux/slab.h>
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
new file mode 100644 (file)
index 0000000..1dd667b
--- /dev/null
@@ -0,0 +1,870 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * Author: Sami Tolvanen <samitolvanen@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include "dm-verity-fec.h"
+#include <linux/math64.h>
+#include <linux/sysfs.h>
+
+#define DM_MSG_PREFIX  "verity-fec"
+
+/*
+ * If error correction has been configured, returns true.
+ */
+bool verity_fec_is_enabled(struct dm_verity *v)
+{
+       return v->fec && v->fec->dev;
+}
+
+/*
+ * Return a pointer to dm_verity_fec_io after dm_verity_io and its variable
+ * length fields.
+ */
+static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
+{
+       return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io);
+}
+
+/*
+ * Return an interleaved offset for a byte in RS block.
+ */
+static inline u64 fec_interleave(struct dm_verity *v, u64 offset)
+{
+       u32 mod;
+
+       mod = do_div(offset, v->fec->rsn);
+       return offset + mod * (v->fec->rounds << v->data_dev_block_bits);
+}
+
+/*
+ * Decode an RS block using Reed-Solomon.
+ */
+static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
+                         u8 *data, u8 *fec, int neras)
+{
+       int i;
+       uint16_t par[DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN];
+
+       for (i = 0; i < v->fec->roots; i++)
+               par[i] = fec[i];
+
+       return decode_rs8(fio->rs, data, par, v->fec->rsn, NULL, neras,
+                         fio->erasures, 0, NULL);
+}
+
+/*
+ * Read error-correcting codes for the requested RS block. Returns a pointer
+ * to the data block. Caller is responsible for releasing buf.
+ */
+static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
+                          unsigned *offset, struct dm_buffer **buf)
+{
+       u64 position, block;
+       u8 *res;
+
+       position = (index + rsb) * v->fec->roots;
+       block = position >> v->data_dev_block_bits;
+       *offset = (unsigned)(position - (block << v->data_dev_block_bits));
+
+       res = dm_bufio_read(v->fec->bufio, v->fec->start + block, buf);
+       if (unlikely(IS_ERR(res))) {
+               DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
+                     v->data_dev->name, (unsigned long long)rsb,
+                     (unsigned long long)(v->fec->start + block),
+                     PTR_ERR(res));
+               *buf = NULL;
+       }
+
+       return res;
+}
+
+/* Loop over each preallocated buffer slot. */
+#define fec_for_each_prealloc_buffer(__i) \
+       for (__i = 0; __i < DM_VERITY_FEC_BUF_PREALLOC; __i++)
+
+/* Loop over each extra buffer slot. */
+#define fec_for_each_extra_buffer(io, __i) \
+       for (__i = DM_VERITY_FEC_BUF_PREALLOC; __i < DM_VERITY_FEC_BUF_MAX; __i++)
+
+/* Loop over each allocated buffer. */
+#define fec_for_each_buffer(io, __i) \
+       for (__i = 0; __i < (io)->nbufs; __i++)
+
+/* Loop over each RS block in each allocated buffer. */
+#define fec_for_each_buffer_rs_block(io, __i, __j) \
+       fec_for_each_buffer(io, __i) \
+               for (__j = 0; __j < 1 << DM_VERITY_FEC_BUF_RS_BITS; __j++)
+
+/*
+ * Return a pointer to the current RS block when called inside
+ * fec_for_each_buffer_rs_block.
+ */
+static inline u8 *fec_buffer_rs_block(struct dm_verity *v,
+                                     struct dm_verity_fec_io *fio,
+                                     unsigned i, unsigned j)
+{
+       return &fio->bufs[i][j * v->fec->rsn];
+}
+
+/*
+ * Return an index to the current RS block when called inside
+ * fec_for_each_buffer_rs_block.
+ */
+static inline unsigned fec_buffer_rs_index(unsigned i, unsigned j)
+{
+       return (i << DM_VERITY_FEC_BUF_RS_BITS) + j;
+}
+
+/*
+ * Decode all RS blocks from buffers and copy corrected bytes into fio->output
+ * starting from block_offset.
+ */
+static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
+                          u64 rsb, int byte_index, unsigned block_offset,
+                          int neras)
+{
+       int r, corrected = 0, res;
+       struct dm_buffer *buf;
+       unsigned n, i, offset;
+       u8 *par, *block;
+
+       par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
+       if (IS_ERR(par))
+               return PTR_ERR(par);
+
+       /*
+        * Decode the RS blocks we have in bufs. Each RS block results in
+        * one corrected target byte and consumes fec->roots parity bytes.
+        */
+       fec_for_each_buffer_rs_block(fio, n, i) {
+               block = fec_buffer_rs_block(v, fio, n, i);
+               res = fec_decode_rs8(v, fio, block, &par[offset], neras);
+               if (res < 0) {
+                       dm_bufio_release(buf);
+
+                       r = res;
+                       goto error;
+               }
+
+               corrected += res;
+               fio->output[block_offset] = block[byte_index];
+
+               block_offset++;
+               if (block_offset >= 1 << v->data_dev_block_bits)
+                       goto done;
+
+               /* read the next block when we run out of parity bytes */
+               offset += v->fec->roots;
+               if (offset >= 1 << v->data_dev_block_bits) {
+                       dm_bufio_release(buf);
+
+                       par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
+                       if (unlikely(IS_ERR(par)))
+                               return PTR_ERR(par);
+               }
+       }
+done:
+       r = corrected;
+error:
+       if (r < 0 && neras)
+               DMERR_LIMIT("%s: FEC %llu: failed to correct: %d",
+                           v->data_dev->name, (unsigned long long)rsb, r);
+       else if (r > 0) {
+               DMWARN_LIMIT("%s: FEC %llu: corrected %d errors",
+                            v->data_dev->name, (unsigned long long)rsb, r);
+               atomic_add_unless(&v->fec->corrected, 1, INT_MAX);
+       }
+
+       return r;
+}
+
+/*
+ * Locate data block erasures using verity hashes.
+ */
+static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
+                         u8 *want_digest, u8 *data)
+{
+       if (unlikely(verity_hash(v, verity_io_hash_desc(v, io),
+                                data, 1 << v->data_dev_block_bits,
+                                verity_io_real_digest(v, io))))
+               return 0;
+
+       return memcmp(verity_io_real_digest(v, io), want_digest,
+                     v->digest_size) != 0;
+}
+
+/*
+ * Read data blocks that are part of the RS block and deinterleave as much as
+ * fits into buffers. Check for erasure locations if @neras is non-NULL.
+ */
+static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
+                        u64 rsb, u64 target, unsigned block_offset,
+                        int *neras)
+{
+       bool is_zero;
+       int i, j, target_index = -1;
+       struct dm_buffer *buf;
+       struct dm_bufio_client *bufio;
+       struct dm_verity_fec_io *fio = fec_io(io);
+       u64 block, ileaved;
+       u8 *bbuf, *rs_block;
+       u8 want_digest[v->digest_size];
+       unsigned n, k;
+
+       if (neras)
+               *neras = 0;
+
+       /*
+        * read each of the rsn data blocks that are part of the RS block, and
+        * interleave contents to available bufs
+        */
+       for (i = 0; i < v->fec->rsn; i++) {
+               ileaved = fec_interleave(v, rsb * v->fec->rsn + i);
+
+               /*
+                * target is the data block we want to correct, target_index is
+                * the index of this block within the rsn RS blocks
+                */
+               if (ileaved == target)
+                       target_index = i;
+
+               block = ileaved >> v->data_dev_block_bits;
+               bufio = v->fec->data_bufio;
+
+               if (block >= v->data_blocks) {
+                       block -= v->data_blocks;
+
+                       /*
+                        * blocks outside the area were assumed to contain
+                        * zeros when encoding data was generated
+                        */
+                       if (unlikely(block >= v->fec->hash_blocks))
+                               continue;
+
+                       block += v->hash_start;
+                       bufio = v->bufio;
+               }
+
+               bbuf = dm_bufio_read(bufio, block, &buf);
+               if (unlikely(IS_ERR(bbuf))) {
+                       DMWARN_LIMIT("%s: FEC %llu: read failed (%llu): %ld",
+                                    v->data_dev->name,
+                                    (unsigned long long)rsb,
+                                    (unsigned long long)block, PTR_ERR(bbuf));
+
+                       /* assume the block is corrupted */
+                       if (neras && *neras <= v->fec->roots)
+                               fio->erasures[(*neras)++] = i;
+
+                       continue;
+               }
+
+               /* locate erasures if the block is on the data device */
+               if (bufio == v->fec->data_bufio &&
+                   verity_hash_for_block(v, io, block, want_digest,
+                                         &is_zero) == 0) {
+                       /* skip known zero blocks entirely */
+                       if (is_zero)
+                               continue;
+
+                       /*
+                        * skip if we have already found the theoretical
+                        * maximum number (i.e. fec->roots) of erasures
+                        */
+                       if (neras && *neras <= v->fec->roots &&
+                           fec_is_erasure(v, io, want_digest, bbuf))
+                               fio->erasures[(*neras)++] = i;
+               }
+
+               /*
+                * deinterleave and copy the bytes that fit into bufs,
+                * starting from block_offset
+                */
+               fec_for_each_buffer_rs_block(fio, n, j) {
+                       k = fec_buffer_rs_index(n, j) + block_offset;
+
+                       if (k >= 1 << v->data_dev_block_bits)
+                               goto done;
+
+                       rs_block = fec_buffer_rs_block(v, fio, n, j);
+                       rs_block[i] = bbuf[k];
+               }
+done:
+               dm_bufio_release(buf);
+       }
+
+       return target_index;
+}
+
+/*
+ * Allocate RS control structure and FEC buffers from preallocated mempools,
+ * and attempt to allocate as many extra buffers as available.
+ */
+static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
+{
+       unsigned n;
+
+       if (!fio->rs) {
+               fio->rs = mempool_alloc(v->fec->rs_pool, 0);
+               if (unlikely(!fio->rs)) {
+                       DMERR("failed to allocate RS");
+                       return -ENOMEM;
+               }
+       }
+
+       fec_for_each_prealloc_buffer(n) {
+               if (fio->bufs[n])
+                       continue;
+
+               fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOIO);
+               if (unlikely(!fio->bufs[n])) {
+                       DMERR("failed to allocate FEC buffer");
+                       return -ENOMEM;
+               }
+       }
+
+       /* try to allocate the maximum number of buffers */
+       fec_for_each_extra_buffer(fio, n) {
+               if (fio->bufs[n])
+                       continue;
+
+               fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOIO);
+               /* we can manage with even one buffer if necessary */
+               if (unlikely(!fio->bufs[n]))
+                       break;
+       }
+       fio->nbufs = n;
+
+       if (!fio->output) {
+               fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO);
+
+               if (!fio->output) {
+                       DMERR("failed to allocate FEC page");
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * Initialize buffers and clear erasures. fec_read_bufs() assumes buffers are
+ * zeroed before deinterleaving.
+ */
+static void fec_init_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
+{
+       unsigned n;
+
+       fec_for_each_buffer(fio, n)
+               memset(fio->bufs[n], 0, v->fec->rsn << DM_VERITY_FEC_BUF_RS_BITS);
+
+       memset(fio->erasures, 0, sizeof(fio->erasures));
+}
+
+/*
+ * Decode all RS blocks in a single data block and return the target block
+ * (indicated by @offset) in fio->output. If @use_erasures is non-zero, uses
+ * hashes to locate erasures.
+ */
+static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
+                         struct dm_verity_fec_io *fio, u64 rsb, u64 offset,
+                         bool use_erasures)
+{
+       int r, neras = 0;
+       unsigned pos;
+
+       r = fec_alloc_bufs(v, fio);
+       if (unlikely(r < 0))
+               return r;
+
+       for (pos = 0; pos < 1 << v->data_dev_block_bits; ) {
+               fec_init_bufs(v, fio);
+
+               r = fec_read_bufs(v, io, rsb, offset, pos,
+                                 use_erasures ? &neras : NULL);
+               if (unlikely(r < 0))
+                       return r;
+
+               r = fec_decode_bufs(v, fio, rsb, r, pos, neras);
+               if (r < 0)
+                       return r;
+
+               pos += fio->nbufs << DM_VERITY_FEC_BUF_RS_BITS;
+       }
+
+       /* Always re-validate the corrected block against the expected hash */
+       r = verity_hash(v, verity_io_hash_desc(v, io), fio->output,
+                       1 << v->data_dev_block_bits,
+                       verity_io_real_digest(v, io));
+       if (unlikely(r < 0))
+               return r;
+
+       if (memcmp(verity_io_real_digest(v, io), verity_io_want_digest(v, io),
+                  v->digest_size)) {
+               DMERR_LIMIT("%s: FEC %llu: failed to correct (%d erasures)",
+                           v->data_dev->name, (unsigned long long)rsb, neras);
+               return -EILSEQ;
+       }
+
+       return 0;
+}
+
+static int fec_bv_copy(struct dm_verity *v, struct dm_verity_io *io, u8 *data,
+                      size_t len)
+{
+       struct dm_verity_fec_io *fio = fec_io(io);
+
+       memcpy(data, &fio->output[fio->output_pos], len);
+       fio->output_pos += len;
+
+       return 0;
+}
+
+/*
+ * Correct errors in a block. Copies corrected block to dest if non-NULL,
+ * otherwise to a bio_vec starting from iter.
+ */
+int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
+                     enum verity_block_type type, sector_t block, u8 *dest,
+                     struct bvec_iter *iter)
+{
+       int r;
+       struct dm_verity_fec_io *fio = fec_io(io);
+       u64 offset, res, rsb;
+
+       if (!verity_fec_is_enabled(v))
+               return -EOPNOTSUPP;
+
+       if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) {
+               DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name);
+               return -EIO;
+       }
+
+       fio->level++;
+
+       if (type == DM_VERITY_BLOCK_TYPE_METADATA)
+               block += v->data_blocks;
+
+       /*
+        * For RS(M, N), the continuous FEC data is divided into blocks of N
+        * bytes. Since block size may not be divisible by N, the last block
+        * is zero padded when decoding.
+        *
+        * Each byte of the block is covered by a different RS(M, N) code,
+        * and each code is interleaved over N blocks to make it less likely
+        * that bursty corruption will leave us in unrecoverable state.
+        */
+
+       offset = block << v->data_dev_block_bits;
+       res = div64_u64(offset, v->fec->rounds << v->data_dev_block_bits);
+
+       /*
+        * The base RS block we can feed to the interleaver to find out all
+        * blocks required for decoding.
+        */
+       rsb = offset - res * (v->fec->rounds << v->data_dev_block_bits);
+
+       /*
+        * Locating erasures is slow, so attempt to recover the block without
+        * them first. Do a second attempt with erasures if the corruption is
+        * bad enough.
+        */
+       r = fec_decode_rsb(v, io, fio, rsb, offset, false);
+       if (r < 0) {
+               r = fec_decode_rsb(v, io, fio, rsb, offset, true);
+               if (r < 0)
+                       goto done;
+       }
+
+       if (dest)
+               memcpy(dest, fio->output, 1 << v->data_dev_block_bits);
+       else if (iter) {
+               fio->output_pos = 0;
+               r = verity_for_bv_block(v, io, iter, fec_bv_copy);
+       }
+
+done:
+       fio->level--;
+       return r;
+}
+
+/*
+ * Clean up per-bio data.
+ */
+void verity_fec_finish_io(struct dm_verity_io *io)
+{
+       unsigned n;
+       struct dm_verity_fec *f = io->v->fec;
+       struct dm_verity_fec_io *fio = fec_io(io);
+
+       if (!verity_fec_is_enabled(io->v))
+               return;
+
+       mempool_free(fio->rs, f->rs_pool);
+
+       fec_for_each_prealloc_buffer(n)
+               mempool_free(fio->bufs[n], f->prealloc_pool);
+
+       fec_for_each_extra_buffer(fio, n)
+               mempool_free(fio->bufs[n], f->extra_pool);
+
+       mempool_free(fio->output, f->output_pool);
+}
+
+/*
+ * Initialize per-bio data.
+ */
+void verity_fec_init_io(struct dm_verity_io *io)
+{
+       struct dm_verity_fec_io *fio = fec_io(io);
+
+       if (!verity_fec_is_enabled(io->v))
+               return;
+
+       fio->rs = NULL;
+       memset(fio->bufs, 0, sizeof(fio->bufs));
+       fio->nbufs = 0;
+       fio->output = NULL;
+       fio->level = 0;
+}
+
+/*
+ * Append feature arguments and values to the status table.
+ */
+unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz,
+                                char *result, unsigned maxlen)
+{
+       if (!verity_fec_is_enabled(v))
+               return sz;
+
+       DMEMIT(" " DM_VERITY_OPT_FEC_DEV " %s "
+              DM_VERITY_OPT_FEC_BLOCKS " %llu "
+              DM_VERITY_OPT_FEC_START " %llu "
+              DM_VERITY_OPT_FEC_ROOTS " %d",
+              v->fec->dev->name,
+              (unsigned long long)v->fec->blocks,
+              (unsigned long long)v->fec->start,
+              v->fec->roots);
+
+       return sz;
+}
+
+void verity_fec_dtr(struct dm_verity *v)
+{
+       struct dm_verity_fec *f = v->fec;
+       struct kobject *kobj = &f->kobj_holder.kobj;
+
+       if (!verity_fec_is_enabled(v))
+               goto out;
+
+       mempool_destroy(f->rs_pool);
+       mempool_destroy(f->prealloc_pool);
+       mempool_destroy(f->extra_pool);
+       kmem_cache_destroy(f->cache);
+
+       if (f->data_bufio)
+               dm_bufio_client_destroy(f->data_bufio);
+       if (f->bufio)
+               dm_bufio_client_destroy(f->bufio);
+
+       if (f->dev)
+               dm_put_device(v->ti, f->dev);
+
+       if (kobj->state_initialized) {
+               kobject_put(kobj);
+               wait_for_completion(dm_get_completion_from_kobject(kobj));
+       }
+
+out:
+       kfree(f);
+       v->fec = NULL;
+}
+
+static void *fec_rs_alloc(gfp_t gfp_mask, void *pool_data)
+{
+       struct dm_verity *v = (struct dm_verity *)pool_data;
+
+       return init_rs(8, 0x11d, 0, 1, v->fec->roots);
+}
+
+static void fec_rs_free(void *element, void *pool_data)
+{
+       struct rs_control *rs = (struct rs_control *)element;
+
+       if (rs)
+               free_rs(rs);
+}
+
+bool verity_is_fec_opt_arg(const char *arg_name)
+{
+       return (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV) ||
+               !strcasecmp(arg_name, DM_VERITY_OPT_FEC_BLOCKS) ||
+               !strcasecmp(arg_name, DM_VERITY_OPT_FEC_START) ||
+               !strcasecmp(arg_name, DM_VERITY_OPT_FEC_ROOTS));
+}
+
+int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
+                             unsigned *argc, const char *arg_name)
+{
+       int r;
+       struct dm_target *ti = v->ti;
+       const char *arg_value;
+       unsigned long long num_ll;
+       unsigned char num_c;
+       char dummy;
+
+       if (!*argc) {
+               ti->error = "FEC feature arguments require a value";
+               return -EINVAL;
+       }
+
+       arg_value = dm_shift_arg(as);
+       (*argc)--;
+
+       if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV)) {
+               r = dm_get_device(ti, arg_value, FMODE_READ, &v->fec->dev);
+               if (r) {
+                       ti->error = "FEC device lookup failed";
+                       return r;
+               }
+
+       } else if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_BLOCKS)) {
+               if (sscanf(arg_value, "%llu%c", &num_ll, &dummy) != 1 ||
+                   ((sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
+                    >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll)) {
+                       ti->error = "Invalid " DM_VERITY_OPT_FEC_BLOCKS;
+                       return -EINVAL;
+               }
+               v->fec->blocks = num_ll;
+
+       } else if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_START)) {
+               if (sscanf(arg_value, "%llu%c", &num_ll, &dummy) != 1 ||
+                   ((sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) >>
+                    (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll)) {
+                       ti->error = "Invalid " DM_VERITY_OPT_FEC_START;
+                       return -EINVAL;
+               }
+               v->fec->start = num_ll;
+
+       } else if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_ROOTS)) {
+               if (sscanf(arg_value, "%hhu%c", &num_c, &dummy) != 1 || !num_c ||
+                   num_c < (DM_VERITY_FEC_RSM - DM_VERITY_FEC_MAX_RSN) ||
+                   num_c > (DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN)) {
+                       ti->error = "Invalid " DM_VERITY_OPT_FEC_ROOTS;
+                       return -EINVAL;
+               }
+               v->fec->roots = num_c;
+
+       } else {
+               ti->error = "Unrecognized verity FEC feature request";
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static ssize_t corrected_show(struct kobject *kobj, struct kobj_attribute *attr,
+                             char *buf)
+{
+       struct dm_verity_fec *f = container_of(kobj, struct dm_verity_fec,
+                                              kobj_holder.kobj);
+
+       return sprintf(buf, "%d\n", atomic_read(&f->corrected));
+}
+
+static struct kobj_attribute attr_corrected = __ATTR_RO(corrected);
+
+static struct attribute *fec_attrs[] = {
+       &attr_corrected.attr,
+       NULL
+};
+
+static struct kobj_type fec_ktype = {
+       .sysfs_ops = &kobj_sysfs_ops,
+       .default_attrs = fec_attrs,
+       .release = dm_kobject_release
+};
+
+/*
+ * Allocate dm_verity_fec for v->fec. Must be called before verity_fec_ctr.
+ */
+int verity_fec_ctr_alloc(struct dm_verity *v)
+{
+       struct dm_verity_fec *f;
+
+       f = kzalloc(sizeof(struct dm_verity_fec), GFP_KERNEL);
+       if (!f) {
+               v->ti->error = "Cannot allocate FEC structure";
+               return -ENOMEM;
+       }
+       v->fec = f;
+
+       return 0;
+}
+
+/*
+ * Validate arguments and preallocate memory. Must be called after arguments
+ * have been parsed using verity_fec_parse_opt_args.
+ */
+int verity_fec_ctr(struct dm_verity *v)
+{
+       int r;
+       struct dm_verity_fec *f = v->fec;
+       struct dm_target *ti = v->ti;
+       struct mapped_device *md = dm_table_get_md(ti->table);
+       u64 hash_blocks;
+
+       if (!verity_fec_is_enabled(v)) {
+               verity_fec_dtr(v);
+               return 0;
+       }
+
+       /* Create a kobject and sysfs attributes */
+       init_completion(&f->kobj_holder.completion);
+
+       r = kobject_init_and_add(&f->kobj_holder.kobj, &fec_ktype,
+                                &disk_to_dev(dm_disk(md))->kobj, "%s", "fec");
+       if (r) {
+               ti->error = "Cannot create kobject";
+               return r;
+       }
+
+       /*
+        * FEC is computed over data blocks, possible metadata, and
+        * hash blocks. In other words, FEC covers total of fec_blocks
+        * blocks consisting of the following:
+        *
+        *  data blocks | hash blocks | metadata (optional)
+        *
+        * We allow metadata after hash blocks to support a use case
+        * where all data is stored on the same device and FEC covers
+        * the entire area.
+        *
+        * If metadata is included, we require it to be available on the
+        * hash device after the hash blocks.
+        */
+
+       hash_blocks = v->hash_blocks - v->hash_start;
+
+       /*
+        * Require matching block sizes for data and hash devices for
+        * simplicity.
+        */
+       if (v->data_dev_block_bits != v->hash_dev_block_bits) {
+               ti->error = "Block sizes must match to use FEC";
+               return -EINVAL;
+       }
+
+       if (!f->roots) {
+               ti->error = "Missing " DM_VERITY_OPT_FEC_ROOTS;
+               return -EINVAL;
+       }
+       f->rsn = DM_VERITY_FEC_RSM - f->roots;
+
+       if (!f->blocks) {
+               ti->error = "Missing " DM_VERITY_OPT_FEC_BLOCKS;
+               return -EINVAL;
+       }
+
+       f->rounds = f->blocks;
+       if (sector_div(f->rounds, f->rsn))
+               f->rounds++;
+
+       /*
+        * Due to optional metadata, f->blocks can be larger than
+        * data_blocks and hash_blocks combined.
+        */
+       if (f->blocks < v->data_blocks + hash_blocks || !f->rounds) {
+               ti->error = "Invalid " DM_VERITY_OPT_FEC_BLOCKS;
+               return -EINVAL;
+       }
+
+       /*
+        * Metadata is accessed through the hash device, so we require
+        * it to be large enough.
+        */
+       f->hash_blocks = f->blocks - v->data_blocks;
+       if (dm_bufio_get_device_size(v->bufio) < f->hash_blocks) {
+               ti->error = "Hash device is too small for "
+                       DM_VERITY_OPT_FEC_BLOCKS;
+               return -E2BIG;
+       }
+
+       f->bufio = dm_bufio_client_create(f->dev->bdev,
+                                         1 << v->data_dev_block_bits,
+                                         1, 0, NULL, NULL);
+       if (IS_ERR(f->bufio)) {
+               ti->error = "Cannot initialize FEC bufio client";
+               return PTR_ERR(f->bufio);
+       }
+
+       if (dm_bufio_get_device_size(f->bufio) <
+           ((f->start + f->rounds * f->roots) >> v->data_dev_block_bits)) {
+               ti->error = "FEC device is too small";
+               return -E2BIG;
+       }
+
+       f->data_bufio = dm_bufio_client_create(v->data_dev->bdev,
+                                              1 << v->data_dev_block_bits,
+                                              1, 0, NULL, NULL);
+       if (IS_ERR(f->data_bufio)) {
+               ti->error = "Cannot initialize FEC data bufio client";
+               return PTR_ERR(f->data_bufio);
+       }
+
+       if (dm_bufio_get_device_size(f->data_bufio) < v->data_blocks) {
+               ti->error = "Data device is too small";
+               return -E2BIG;
+       }
+
+       /* Preallocate an rs_control structure for each worker thread */
+       f->rs_pool = mempool_create(num_online_cpus(), fec_rs_alloc,
+                                   fec_rs_free, (void *) v);
+       if (!f->rs_pool) {
+               ti->error = "Cannot allocate RS pool";
+               return -ENOMEM;
+       }
+
+       f->cache = kmem_cache_create("dm_verity_fec_buffers",
+                                    f->rsn << DM_VERITY_FEC_BUF_RS_BITS,
+                                    0, 0, NULL);
+       if (!f->cache) {
+               ti->error = "Cannot create FEC buffer cache";
+               return -ENOMEM;
+       }
+
+       /* Preallocate DM_VERITY_FEC_BUF_PREALLOC buffers for each thread */
+       f->prealloc_pool = mempool_create_slab_pool(num_online_cpus() *
+                                                   DM_VERITY_FEC_BUF_PREALLOC,
+                                                   f->cache);
+       if (!f->prealloc_pool) {
+               ti->error = "Cannot allocate FEC buffer prealloc pool";
+               return -ENOMEM;
+       }
+
+       f->extra_pool = mempool_create_slab_pool(0, f->cache);
+       if (!f->extra_pool) {
+               ti->error = "Cannot allocate FEC buffer extra pool";
+               return -ENOMEM;
+       }
+
+       /* Preallocate an output buffer for each thread */
+       f->output_pool = mempool_create_kmalloc_pool(num_online_cpus(),
+                                                    1 << v->data_dev_block_bits);
+       if (!f->output_pool) {
+               ti->error = "Cannot allocate FEC output pool";
+               return -ENOMEM;
+       }
+
+       /* Reserve space for our per-bio data */
+       ti->per_bio_data_size += sizeof(struct dm_verity_fec_io);
+
+       return 0;
+}
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
new file mode 100644 (file)
index 0000000..b8e21ce
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * Author: Sami Tolvanen <samitolvanen@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef DM_VERITY_FEC_H
+#define DM_VERITY_FEC_H
+
+#include "dm.h"
+#include "dm-verity.h"
+#include <linux/rslib.h>
+
+/* Reed-Solomon(M, N) parameters */
+#define DM_VERITY_FEC_RSM              255
+#define DM_VERITY_FEC_MAX_RSN          253
+#define DM_VERITY_FEC_MIN_RSN          231     /* ~10% space overhead */
+
+/* buffers for deinterleaving and decoding */
+#define DM_VERITY_FEC_BUF_PREALLOC     1       /* buffers to preallocate */
+#define DM_VERITY_FEC_BUF_RS_BITS      4       /* 1 << RS blocks per buffer */
+/* we need buffers for at most 1 << block size RS blocks */
+#define DM_VERITY_FEC_BUF_MAX \
+       (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS))
+
+/* maximum recursion level for verity_fec_decode */
+#define DM_VERITY_FEC_MAX_RECURSION    4
+
+#define DM_VERITY_OPT_FEC_DEV          "use_fec_from_device"
+#define DM_VERITY_OPT_FEC_BLOCKS       "fec_blocks"
+#define DM_VERITY_OPT_FEC_START                "fec_start"
+#define DM_VERITY_OPT_FEC_ROOTS                "fec_roots"
+
+/* configuration */
+struct dm_verity_fec {
+       struct dm_dev *dev;     /* parity data device */
+       struct dm_bufio_client *data_bufio;     /* for data dev access */
+       struct dm_bufio_client *bufio;          /* for parity data access */
+       sector_t start;         /* parity data start in blocks */
+       sector_t blocks;        /* number of blocks covered */
+       sector_t rounds;        /* number of interleaving rounds */
+       sector_t hash_blocks;   /* blocks covered after v->hash_start */
+       unsigned char roots;    /* number of parity bytes, M-N of RS(M, N) */
+       unsigned char rsn;      /* N of RS(M, N) */
+       mempool_t *rs_pool;     /* mempool for fio->rs */
+       mempool_t *prealloc_pool;       /* mempool for preallocated buffers */
+       mempool_t *extra_pool;  /* mempool for extra buffers */
+       mempool_t *output_pool; /* mempool for output */
+       struct kmem_cache *cache;       /* cache for buffers */
+       atomic_t corrected;             /* corrected errors */
+       struct dm_kobject_holder kobj_holder;   /* for sysfs attributes */
+};
+
+/* per-bio data */
+struct dm_verity_fec_io {
+       struct rs_control *rs;  /* Reed-Solomon state */
+       int erasures[DM_VERITY_FEC_MAX_RSN];    /* erasures for decode_rs8 */
+       u8 *bufs[DM_VERITY_FEC_BUF_MAX];        /* bufs for deinterleaving */
+       unsigned nbufs;         /* number of buffers allocated */
+       u8 *output;             /* buffer for corrected output */
+       size_t output_pos;
+       unsigned level;         /* recursion level */
+};
+
+#ifdef CONFIG_DM_VERITY_FEC
+
+/* each feature parameter requires a value */
+#define DM_VERITY_OPTS_FEC     8
+
+extern bool verity_fec_is_enabled(struct dm_verity *v);
+
+extern int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
+                            enum verity_block_type type, sector_t block,
+                            u8 *dest, struct bvec_iter *iter);
+
+extern unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz,
+                                       char *result, unsigned maxlen);
+
+extern void verity_fec_finish_io(struct dm_verity_io *io);
+extern void verity_fec_init_io(struct dm_verity_io *io);
+
+extern bool verity_is_fec_opt_arg(const char *arg_name);
+extern int verity_fec_parse_opt_args(struct dm_arg_set *as,
+                                    struct dm_verity *v, unsigned *argc,
+                                    const char *arg_name);
+
+extern void verity_fec_dtr(struct dm_verity *v);
+
+extern int verity_fec_ctr_alloc(struct dm_verity *v);
+extern int verity_fec_ctr(struct dm_verity *v);
+
+#else /* !CONFIG_DM_VERITY_FEC */
+
+#define DM_VERITY_OPTS_FEC     0
+
+static inline bool verity_fec_is_enabled(struct dm_verity *v)
+{
+       return false;
+}
+
+static inline int verity_fec_decode(struct dm_verity *v,
+                                   struct dm_verity_io *io,
+                                   enum verity_block_type type,
+                                   sector_t block, u8 *dest,
+                                   struct bvec_iter *iter)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline unsigned verity_fec_status_table(struct dm_verity *v,
+                                              unsigned sz, char *result,
+                                              unsigned maxlen)
+{
+       return sz;
+}
+
+static inline void verity_fec_finish_io(struct dm_verity_io *io)
+{
+}
+
+static inline void verity_fec_init_io(struct dm_verity_io *io)
+{
+}
+
+static inline bool verity_is_fec_opt_arg(const char *arg_name)
+{
+       return false;
+}
+
+static inline int verity_fec_parse_opt_args(struct dm_arg_set *as,
+                                           struct dm_verity *v,
+                                           unsigned *argc,
+                                           const char *arg_name)
+{
+       return -EINVAL;
+}
+
+static inline void verity_fec_dtr(struct dm_verity *v)
+{
+}
+
+static inline int verity_fec_ctr_alloc(struct dm_verity *v)
+{
+       return 0;
+}
+
+static inline int verity_fec_ctr(struct dm_verity *v)
+{
+       return 0;
+}
+
+#endif /* CONFIG_DM_VERITY_FEC */
+
+#endif /* DM_VERITY_FEC_H */
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
new file mode 100644 (file)
index 0000000..9d3d4b2
--- /dev/null
@@ -0,0 +1,1100 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * Author: Mikulas Patocka <mpatocka@redhat.com>
+ *
+ * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
+ *
+ * This file is released under the GPLv2.
+ *
+ * In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set
+ * default prefetch value. Data are read in "prefetch_cluster" chunks from the
+ * hash device. Setting this greatly improves performance when data and hash
+ * are on the same disk on different partitions on devices with poor random
+ * access behavior.
+ */
+
+#include "dm-verity.h"
+#include "dm-verity-fec.h"
+
+#include <linux/module.h>
+#include <linux/reboot.h>
+
+#define DM_MSG_PREFIX                  "verity"
+
+#define DM_VERITY_ENV_LENGTH           42
+#define DM_VERITY_ENV_VAR_NAME         "DM_VERITY_ERR_BLOCK_NR"
+
+#define DM_VERITY_DEFAULT_PREFETCH_SIZE        262144
+
+#define DM_VERITY_MAX_CORRUPTED_ERRS   100
+
+#define DM_VERITY_OPT_LOGGING          "ignore_corruption"
+#define DM_VERITY_OPT_RESTART          "restart_on_corruption"
+#define DM_VERITY_OPT_IGN_ZEROES       "ignore_zero_blocks"
+
+#define DM_VERITY_OPTS_MAX             (2 + DM_VERITY_OPTS_FEC)
+
+static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
+
+module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
+
+struct dm_verity_prefetch_work {
+       struct work_struct work;
+       struct dm_verity *v;
+       sector_t block;
+       unsigned n_blocks;
+};
+
+/*
+ * Auxiliary structure appended to each dm-bufio buffer. If the value
+ * hash_verified is nonzero, hash of the block has been verified.
+ *
+ * The variable hash_verified is set to 0 when allocating the buffer, then
+ * it can be changed to 1 and it is never reset to 0 again.
+ *
+ * There is no lock around this value, a race condition can at worst cause
+ * that multiple processes verify the hash of the same buffer simultaneously
+ * and write 1 to hash_verified simultaneously.
+ * This condition is harmless, so we don't need locking.
+ */
+struct buffer_aux {
+       int hash_verified;
+};
+
+/*
+ * Initialize struct buffer_aux for a freshly created buffer.
+ */
+static void dm_bufio_alloc_callback(struct dm_buffer *buf)
+{
+       struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
+
+       aux->hash_verified = 0;
+}
+
+/*
+ * Translate input sector number to the sector number on the target device.
+ */
+static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector)
+{
+       return v->data_start + dm_target_offset(v->ti, bi_sector);
+}
+
+/*
+ * Return hash position of a specified block at a specified tree level
+ * (0 is the lowest level).
+ * The lowest "hash_per_block_bits"-bits of the result denote hash position
+ * inside a hash block. The remaining bits denote location of the hash block.
+ */
+static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
+                                        int level)
+{
+       return block >> (level * v->hash_per_block_bits);
+}
+
+/*
+ * Wrapper for crypto_shash_init, which handles verity salting.
+ */
+static int verity_hash_init(struct dm_verity *v, struct shash_desc *desc)
+{
+       int r;
+
+       desc->tfm = v->tfm;
+       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       r = crypto_shash_init(desc);
+
+       if (unlikely(r < 0)) {
+               DMERR("crypto_shash_init failed: %d", r);
+               return r;
+       }
+
+       if (likely(v->version >= 1)) {
+               r = crypto_shash_update(desc, v->salt, v->salt_size);
+
+               if (unlikely(r < 0)) {
+                       DMERR("crypto_shash_update failed: %d", r);
+                       return r;
+               }
+       }
+
+       return 0;
+}
+
+static int verity_hash_update(struct dm_verity *v, struct shash_desc *desc,
+                             const u8 *data, size_t len)
+{
+       int r = crypto_shash_update(desc, data, len);
+
+       if (unlikely(r < 0))
+               DMERR("crypto_shash_update failed: %d", r);
+
+       return r;
+}
+
+static int verity_hash_final(struct dm_verity *v, struct shash_desc *desc,
+                            u8 *digest)
+{
+       int r;
+
+       if (unlikely(!v->version)) {
+               r = crypto_shash_update(desc, v->salt, v->salt_size);
+
+               if (r < 0) {
+                       DMERR("crypto_shash_update failed: %d", r);
+                       return r;
+               }
+       }
+
+       r = crypto_shash_final(desc, digest);
+
+       if (unlikely(r < 0))
+               DMERR("crypto_shash_final failed: %d", r);
+
+       return r;
+}
+
+int verity_hash(struct dm_verity *v, struct shash_desc *desc,
+               const u8 *data, size_t len, u8 *digest)
+{
+       int r;
+
+       r = verity_hash_init(v, desc);
+       if (unlikely(r < 0))
+               return r;
+
+       r = verity_hash_update(v, desc, data, len);
+       if (unlikely(r < 0))
+               return r;
+
+       return verity_hash_final(v, desc, digest);
+}
+
+static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
+                                sector_t *hash_block, unsigned *offset)
+{
+       sector_t position = verity_position_at_level(v, block, level);
+       unsigned idx;
+
+       *hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
+
+       if (!offset)
+               return;
+
+       idx = position & ((1 << v->hash_per_block_bits) - 1);
+       if (!v->version)
+               *offset = idx * v->digest_size;
+       else
+               *offset = idx << (v->hash_dev_block_bits - v->hash_per_block_bits);
+}
+
+/*
+ * Handle verification errors.
+ */
+static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
+                            unsigned long long block)
+{
+       char verity_env[DM_VERITY_ENV_LENGTH];
+       char *envp[] = { verity_env, NULL };
+       const char *type_str = "";
+       struct mapped_device *md = dm_table_get_md(v->ti->table);
+
+       /* Corruption should be visible in device status in all modes */
+       v->hash_failed = 1;
+
+       if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
+               goto out;
+
+       v->corrupted_errs++;
+
+       switch (type) {
+       case DM_VERITY_BLOCK_TYPE_DATA:
+               type_str = "data";
+               break;
+       case DM_VERITY_BLOCK_TYPE_METADATA:
+               type_str = "metadata";
+               break;
+       default:
+               BUG();
+       }
+
+       DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
+               block);
+
+       if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
+               DMERR("%s: reached maximum errors", v->data_dev->name);
+
+       snprintf(verity_env, DM_VERITY_ENV_LENGTH, "%s=%d,%llu",
+               DM_VERITY_ENV_VAR_NAME, type, block);
+
+       kobject_uevent_env(&disk_to_dev(dm_disk(md))->kobj, KOBJ_CHANGE, envp);
+
+out:
+       if (v->mode == DM_VERITY_MODE_LOGGING)
+               return 0;
+
+       if (v->mode == DM_VERITY_MODE_RESTART)
+               kernel_restart("dm-verity device corrupted");
+
+       return 1;
+}
+
+/*
+ * Verify hash of a metadata block pertaining to the specified data block
+ * ("block" argument) at a specified level ("level" argument).
+ *
+ * On successful return, verity_io_want_digest(v, io) contains the hash value
+ * for a lower tree level or for the data block (if we're at the lowest level).
+ *
+ * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
+ * If "skip_unverified" is false, unverified buffer is hashed and verified
+ * against current value of verity_io_want_digest(v, io).
+ */
+static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
+                              sector_t block, int level, bool skip_unverified,
+                              u8 *want_digest)
+{
+       struct dm_buffer *buf;
+       struct buffer_aux *aux;
+       u8 *data;
+       int r;
+       sector_t hash_block;
+       unsigned offset;
+
+       verity_hash_at_level(v, block, level, &hash_block, &offset);
+
+       data = dm_bufio_read(v->bufio, hash_block, &buf);
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       aux = dm_bufio_get_aux_data(buf);
+
+       if (!aux->hash_verified) {
+               if (skip_unverified) {
+                       r = 1;
+                       goto release_ret_r;
+               }
+
+               r = verity_hash(v, verity_io_hash_desc(v, io),
+                               data, 1 << v->hash_dev_block_bits,
+                               verity_io_real_digest(v, io));
+               if (unlikely(r < 0))
+                       goto release_ret_r;
+
+               if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
+                                 v->digest_size) == 0))
+                       aux->hash_verified = 1;
+               else if (verity_fec_decode(v, io,
+                                          DM_VERITY_BLOCK_TYPE_METADATA,
+                                          hash_block, data, NULL) == 0)
+                       aux->hash_verified = 1;
+               else if (verity_handle_err(v,
+                                          DM_VERITY_BLOCK_TYPE_METADATA,
+                                          hash_block)) {
+                       r = -EIO;
+                       goto release_ret_r;
+               }
+       }
+
+       data += offset;
+       memcpy(want_digest, data, v->digest_size);
+       r = 0;
+
+release_ret_r:
+       dm_bufio_release(buf);
+       return r;
+}
+
+/*
+ * Find a hash for a given block, write it to digest and verify the integrity
+ * of the hash tree if necessary.
+ */
+int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
+                         sector_t block, u8 *digest, bool *is_zero)
+{
+       int r = 0, i;
+
+       if (likely(v->levels)) {
+               /*
+                * First, we try to get the requested hash for
+                * the current block. If the hash block itself is
+                * verified, zero is returned. If it isn't, this
+                * function returns 1 and we fall back to whole
+                * chain verification.
+                */
+               r = verity_verify_level(v, io, block, 0, true, digest);
+               if (likely(r <= 0))
+                       goto out;
+       }
+
+       memcpy(digest, v->root_digest, v->digest_size);
+
+       for (i = v->levels - 1; i >= 0; i--) {
+               r = verity_verify_level(v, io, block, i, false, digest);
+               if (unlikely(r))
+                       goto out;
+       }
+out:
+       if (!r && v->zero_digest)
+               *is_zero = !memcmp(v->zero_digest, digest, v->digest_size);
+       else
+               *is_zero = false;
+
+       return r;
+}
+
+/*
+ * Calls function process for 1 << v->data_dev_block_bits bytes in the bio_vec
+ * starting from iter.
+ */
+int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+                       struct bvec_iter *iter,
+                       int (*process)(struct dm_verity *v,
+                                      struct dm_verity_io *io, u8 *data,
+                                      size_t len))
+{
+       unsigned todo = 1 << v->data_dev_block_bits;
+       struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
+
+       do {
+               int r;
+               u8 *page;
+               unsigned len;
+               struct bio_vec bv = bio_iter_iovec(bio, *iter);
+
+               page = kmap_atomic(bv.bv_page);
+               len = bv.bv_len;
+
+               if (likely(len >= todo))
+                       len = todo;
+
+               r = process(v, io, page + bv.bv_offset, len);
+               kunmap_atomic(page);
+
+               if (r < 0)
+                       return r;
+
+               bio_advance_iter(bio, iter, len);
+               todo -= len;
+       } while (todo);
+
+       return 0;
+}
+
+static int verity_bv_hash_update(struct dm_verity *v, struct dm_verity_io *io,
+                                u8 *data, size_t len)
+{
+       return verity_hash_update(v, verity_io_hash_desc(v, io), data, len);
+}
+
+static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
+                         u8 *data, size_t len)
+{
+       memset(data, 0, len);
+       return 0;
+}
+
+/*
+ * Verify one "dm_verity_io" structure.
+ */
+static int verity_verify_io(struct dm_verity_io *io)
+{
+       bool is_zero;
+       struct dm_verity *v = io->v;
+       struct bvec_iter start;
+       unsigned b;
+
+       for (b = 0; b < io->n_blocks; b++) {
+               int r;
+               struct shash_desc *desc = verity_io_hash_desc(v, io);
+
+               r = verity_hash_for_block(v, io, io->block + b,
+                                         verity_io_want_digest(v, io),
+                                         &is_zero);
+               if (unlikely(r < 0))
+                       return r;
+
+               if (is_zero) {
+                       /*
+                        * If we expect a zero block, don't validate, just
+                        * return zeros.
+                        */
+                       r = verity_for_bv_block(v, io, &io->iter,
+                                               verity_bv_zero);
+                       if (unlikely(r < 0))
+                               return r;
+
+                       continue;
+               }
+
+               r = verity_hash_init(v, desc);
+               if (unlikely(r < 0))
+                       return r;
+
+               start = io->iter;
+               r = verity_for_bv_block(v, io, &io->iter, verity_bv_hash_update);
+               if (unlikely(r < 0))
+                       return r;
+
+               r = verity_hash_final(v, desc, verity_io_real_digest(v, io));
+               if (unlikely(r < 0))
+                       return r;
+
+               if (likely(memcmp(verity_io_real_digest(v, io),
+                                 verity_io_want_digest(v, io), v->digest_size) == 0))
+                       continue;
+               else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
+                                          io->block + b, NULL, &start) == 0)
+                       continue;
+               else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
+                                          io->block + b))
+                       return -EIO;
+       }
+
+       return 0;
+}
+
+/*
+ * End one "io" structure with a given error.
+ */
+static void verity_finish_io(struct dm_verity_io *io, int error)
+{
+       struct dm_verity *v = io->v;
+       struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
+
+       bio->bi_end_io = io->orig_bi_end_io;
+       bio->bi_error = error;
+
+       verity_fec_finish_io(io);
+
+       bio_endio(bio);
+}
+
+static void verity_work(struct work_struct *w)
+{
+       struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
+
+       verity_finish_io(io, verity_verify_io(io));
+}
+
+static void verity_end_io(struct bio *bio)
+{
+       struct dm_verity_io *io = bio->bi_private;
+
+       if (bio->bi_error && !verity_fec_is_enabled(io->v)) {
+               verity_finish_io(io, bio->bi_error);
+               return;
+       }
+
+       INIT_WORK(&io->work, verity_work);
+       queue_work(io->v->verify_wq, &io->work);
+}
+
+/*
+ * Prefetch buffers for the specified io.
+ * The root buffer is not prefetched, it is assumed that it will be cached
+ * all the time.
+ */
+static void verity_prefetch_io(struct work_struct *work)
+{
+       struct dm_verity_prefetch_work *pw =
+               container_of(work, struct dm_verity_prefetch_work, work);
+       struct dm_verity *v = pw->v;
+       int i;
+
+       for (i = v->levels - 2; i >= 0; i--) {
+               sector_t hash_block_start;
+               sector_t hash_block_end;
+               verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
+               verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
+               if (!i) {
+                       unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
+
+                       cluster >>= v->data_dev_block_bits;
+                       if (unlikely(!cluster))
+                               goto no_prefetch_cluster;
+
+                       if (unlikely(cluster & (cluster - 1)))
+                               cluster = 1 << __fls(cluster);
+
+                       hash_block_start &= ~(sector_t)(cluster - 1);
+                       hash_block_end |= cluster - 1;
+                       if (unlikely(hash_block_end >= v->hash_blocks))
+                               hash_block_end = v->hash_blocks - 1;
+               }
+no_prefetch_cluster:
+               dm_bufio_prefetch(v->bufio, hash_block_start,
+                                 hash_block_end - hash_block_start + 1);
+       }
+
+       kfree(pw);
+}
+
+static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
+{
+       struct dm_verity_prefetch_work *pw;
+
+       pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
+               GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+
+       if (!pw)
+               return;
+
+       INIT_WORK(&pw->work, verity_prefetch_io);
+       pw->v = v;
+       pw->block = io->block;
+       pw->n_blocks = io->n_blocks;
+       queue_work(v->verify_wq, &pw->work);
+}
+
+/*
+ * Bio map function. It allocates dm_verity_io structure and bio vector and
+ * fills them. Then it issues prefetches and the I/O.
+ */
+int verity_map(struct dm_target *ti, struct bio *bio)
+{
+       struct dm_verity *v = ti->private;
+       struct dm_verity_io *io;
+
+       bio->bi_bdev = v->data_dev->bdev;
+       bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
+
+       if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
+           ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
+               DMERR_LIMIT("unaligned io");
+               return -EIO;
+       }
+
+       if (bio_end_sector(bio) >>
+           (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
+               DMERR_LIMIT("io out of range");
+               return -EIO;
+       }
+
+       if (bio_data_dir(bio) == WRITE)
+               return -EIO;
+
+       io = dm_per_bio_data(bio, ti->per_bio_data_size);
+       io->v = v;
+       io->orig_bi_end_io = bio->bi_end_io;
+       io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
+       io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
+
+       bio->bi_end_io = verity_end_io;
+       bio->bi_private = io;
+       io->iter = bio->bi_iter;
+
+       verity_fec_init_io(io);
+
+       verity_submit_prefetch(v, io);
+
+       generic_make_request(bio);
+
+       return DM_MAPIO_SUBMITTED;
+}
+EXPORT_SYMBOL_GPL(verity_map);
+
+/*
+ * Status: V (valid) or C (corruption found)
+ */
+void verity_status(struct dm_target *ti, status_type_t type,
+                         unsigned status_flags, char *result, unsigned maxlen)
+{
+       struct dm_verity *v = ti->private;
+       unsigned args = 0;
+       unsigned sz = 0;
+       unsigned x;
+
+       switch (type) {
+       case STATUSTYPE_INFO:
+               DMEMIT("%c", v->hash_failed ? 'C' : 'V');
+               break;
+       case STATUSTYPE_TABLE:
+               DMEMIT("%u %s %s %u %u %llu %llu %s ",
+                       v->version,
+                       v->data_dev->name,
+                       v->hash_dev->name,
+                       1 << v->data_dev_block_bits,
+                       1 << v->hash_dev_block_bits,
+                       (unsigned long long)v->data_blocks,
+                       (unsigned long long)v->hash_start,
+                       v->alg_name
+                       );
+               for (x = 0; x < v->digest_size; x++)
+                       DMEMIT("%02x", v->root_digest[x]);
+               DMEMIT(" ");
+               if (!v->salt_size)
+                       DMEMIT("-");
+               else
+                       for (x = 0; x < v->salt_size; x++)
+                               DMEMIT("%02x", v->salt[x]);
+               if (v->mode != DM_VERITY_MODE_EIO)
+                       args++;
+               if (verity_fec_is_enabled(v))
+                       args += DM_VERITY_OPTS_FEC;
+               if (v->zero_digest)
+                       args++;
+               if (!args)
+                       return;
+               DMEMIT(" %u", args);
+               if (v->mode != DM_VERITY_MODE_EIO) {
+                       DMEMIT(" ");
+                       switch (v->mode) {
+                       case DM_VERITY_MODE_LOGGING:
+                               DMEMIT(DM_VERITY_OPT_LOGGING);
+                               break;
+                       case DM_VERITY_MODE_RESTART:
+                               DMEMIT(DM_VERITY_OPT_RESTART);
+                               break;
+                       default:
+                               BUG();
+                       }
+               }
+               if (v->zero_digest)
+                       DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
+               sz = verity_fec_status_table(v, sz, result, maxlen);
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(verity_status);
+
+int verity_prepare_ioctl(struct dm_target *ti,
+               struct block_device **bdev, fmode_t *mode)
+{
+       struct dm_verity *v = ti->private;
+
+       *bdev = v->data_dev->bdev;
+
+       if (v->data_start ||
+           ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
+               return 1;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(verity_prepare_ioctl);
+
+int verity_iterate_devices(struct dm_target *ti,
+                                 iterate_devices_callout_fn fn, void *data)
+{
+       struct dm_verity *v = ti->private;
+
+       return fn(ti, v->data_dev, v->data_start, ti->len, data);
+}
+EXPORT_SYMBOL_GPL(verity_iterate_devices);
+
+void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+       struct dm_verity *v = ti->private;
+
+       if (limits->logical_block_size < 1 << v->data_dev_block_bits)
+               limits->logical_block_size = 1 << v->data_dev_block_bits;
+
+       if (limits->physical_block_size < 1 << v->data_dev_block_bits)
+               limits->physical_block_size = 1 << v->data_dev_block_bits;
+
+       blk_limits_io_min(limits, limits->logical_block_size);
+}
+EXPORT_SYMBOL_GPL(verity_io_hints);
+
+void verity_dtr(struct dm_target *ti)
+{
+       struct dm_verity *v = ti->private;
+
+       if (v->verify_wq)
+               destroy_workqueue(v->verify_wq);
+
+       if (v->bufio)
+               dm_bufio_client_destroy(v->bufio);
+
+       kfree(v->salt);
+       kfree(v->root_digest);
+       kfree(v->zero_digest);
+
+       if (v->tfm)
+               crypto_free_shash(v->tfm);
+
+       kfree(v->alg_name);
+
+       if (v->hash_dev)
+               dm_put_device(ti, v->hash_dev);
+
+       if (v->data_dev)
+               dm_put_device(ti, v->data_dev);
+
+       verity_fec_dtr(v);
+
+       kfree(v);
+}
+EXPORT_SYMBOL_GPL(verity_dtr);
+
+static int verity_alloc_zero_digest(struct dm_verity *v)
+{
+       int r = -ENOMEM;
+       struct shash_desc *desc;
+       u8 *zero_data;
+
+       v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
+
+       if (!v->zero_digest)
+               return r;
+
+       desc = kmalloc(v->shash_descsize, GFP_KERNEL);
+
+       if (!desc)
+               return r; /* verity_dtr will free zero_digest */
+
+       zero_data = kzalloc(1 << v->data_dev_block_bits, GFP_KERNEL);
+
+       if (!zero_data)
+               goto out;
+
+       r = verity_hash(v, desc, zero_data, 1 << v->data_dev_block_bits,
+                       v->zero_digest);
+
+out:
+       kfree(desc);
+       kfree(zero_data);
+
+       return r;
+}
+
+static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v)
+{
+       int r;
+       unsigned argc;
+       struct dm_target *ti = v->ti;
+       const char *arg_name;
+
+       static struct dm_arg _args[] = {
+               {0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"},
+       };
+
+       r = dm_read_arg_group(_args, as, &argc, &ti->error);
+       if (r)
+               return -EINVAL;
+
+       if (!argc)
+               return 0;
+
+       do {
+               arg_name = dm_shift_arg(as);
+               argc--;
+
+               if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING)) {
+                       v->mode = DM_VERITY_MODE_LOGGING;
+                       continue;
+
+               } else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART)) {
+                       v->mode = DM_VERITY_MODE_RESTART;
+                       continue;
+
+               } else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
+                       r = verity_alloc_zero_digest(v);
+                       if (r) {
+                               ti->error = "Cannot allocate zero digest";
+                               return r;
+                       }
+                       continue;
+
+               } else if (verity_is_fec_opt_arg(arg_name)) {
+                       r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
+                       if (r)
+                               return r;
+                       continue;
+               }
+
+               ti->error = "Unrecognized verity feature request";
+               return -EINVAL;
+       } while (argc && !r);
+
+       return r;
+}
+
+/*
+ * Target parameters:
+ *     <version>       The current format is version 1.
+ *                     Vsn 0 is compatible with original Chromium OS releases.
+ *     <data device>
+ *     <hash device>
+ *     <data block size>
+ *     <hash block size>
+ *     <the number of data blocks>
+ *     <hash start block>
+ *     <algorithm>
+ *     <digest>
+ *     <salt>          Hex string or "-" if no salt.
+ */
+int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+       struct dm_verity *v;
+       struct dm_arg_set as;
+       unsigned int num;
+       unsigned long long num_ll;
+       int r;
+       int i;
+       sector_t hash_position;
+       char dummy;
+
+       v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL);
+       if (!v) {
+               ti->error = "Cannot allocate verity structure";
+               return -ENOMEM;
+       }
+       ti->private = v;
+       v->ti = ti;
+
+       r = verity_fec_ctr_alloc(v);
+       if (r)
+               goto bad;
+
+       if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) {
+               ti->error = "Device must be readonly";
+               r = -EINVAL;
+               goto bad;
+       }
+
+       if (argc < 10) {
+               ti->error = "Not enough arguments";
+               r = -EINVAL;
+               goto bad;
+       }
+
+       if (sscanf(argv[0], "%u%c", &num, &dummy) != 1 ||
+           num > 1) {
+               ti->error = "Invalid version";
+               r = -EINVAL;
+               goto bad;
+       }
+       v->version = num;
+
+       r = dm_get_device(ti, argv[1], FMODE_READ, &v->data_dev);
+       if (r) {
+               ti->error = "Data device lookup failed";
+               goto bad;
+       }
+
+       r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev);
+       if (r) {
+               ti->error = "Data device lookup failed";
+               goto bad;
+       }
+
+       if (sscanf(argv[3], "%u%c", &num, &dummy) != 1 ||
+           !num || (num & (num - 1)) ||
+           num < bdev_logical_block_size(v->data_dev->bdev) ||
+           num > PAGE_SIZE) {
+               ti->error = "Invalid data device block size";
+               r = -EINVAL;
+               goto bad;
+       }
+       v->data_dev_block_bits = __ffs(num);
+
+       if (sscanf(argv[4], "%u%c", &num, &dummy) != 1 ||
+           !num || (num & (num - 1)) ||
+           num < bdev_logical_block_size(v->hash_dev->bdev) ||
+           num > INT_MAX) {
+               ti->error = "Invalid hash device block size";
+               r = -EINVAL;
+               goto bad;
+       }
+       v->hash_dev_block_bits = __ffs(num);
+
+       if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
+           (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
+           >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
+               ti->error = "Invalid data blocks";
+               r = -EINVAL;
+               goto bad;
+       }
+       v->data_blocks = num_ll;
+
+       if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) {
+               ti->error = "Data device is too small";
+               r = -EINVAL;
+               goto bad;
+       }
+
+       if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
+           (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
+           >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
+               ti->error = "Invalid hash start";
+               r = -EINVAL;
+               goto bad;
+       }
+       v->hash_start = num_ll;
+
+       v->alg_name = kstrdup(argv[7], GFP_KERNEL);
+       if (!v->alg_name) {
+               ti->error = "Cannot allocate algorithm name";
+               r = -ENOMEM;
+               goto bad;
+       }
+
+       v->tfm = crypto_alloc_shash(v->alg_name, 0, 0);
+       if (IS_ERR(v->tfm)) {
+               ti->error = "Cannot initialize hash function";
+               r = PTR_ERR(v->tfm);
+               v->tfm = NULL;
+               goto bad;
+       }
+       v->digest_size = crypto_shash_digestsize(v->tfm);
+       if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
+               ti->error = "Digest size too big";
+               r = -EINVAL;
+               goto bad;
+       }
+       v->shash_descsize =
+               sizeof(struct shash_desc) + crypto_shash_descsize(v->tfm);
+
+       v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
+       if (!v->root_digest) {
+               ti->error = "Cannot allocate root digest";
+               r = -ENOMEM;
+               goto bad;
+       }
+       if (strlen(argv[8]) != v->digest_size * 2 ||
+           hex2bin(v->root_digest, argv[8], v->digest_size)) {
+               ti->error = "Invalid root digest";
+               r = -EINVAL;
+               goto bad;
+       }
+
+       if (strcmp(argv[9], "-")) {
+               v->salt_size = strlen(argv[9]) / 2;
+               v->salt = kmalloc(v->salt_size, GFP_KERNEL);
+               if (!v->salt) {
+                       ti->error = "Cannot allocate salt";
+                       r = -ENOMEM;
+                       goto bad;
+               }
+               if (strlen(argv[9]) != v->salt_size * 2 ||
+                   hex2bin(v->salt, argv[9], v->salt_size)) {
+                       ti->error = "Invalid salt";
+                       r = -EINVAL;
+                       goto bad;
+               }
+       }
+
+       argv += 10;
+       argc -= 10;
+
+       /* Optional parameters */
+       if (argc) {
+               as.argc = argc;
+               as.argv = argv;
+
+               r = verity_parse_opt_args(&as, v);
+               if (r < 0)
+                       goto bad;
+       }
+
+       v->hash_per_block_bits =
+               __fls((1 << v->hash_dev_block_bits) / v->digest_size);
+
+       v->levels = 0;
+       if (v->data_blocks)
+               while (v->hash_per_block_bits * v->levels < 64 &&
+                      (unsigned long long)(v->data_blocks - 1) >>
+                      (v->hash_per_block_bits * v->levels))
+                       v->levels++;
+
+       if (v->levels > DM_VERITY_MAX_LEVELS) {
+               ti->error = "Too many tree levels";
+               r = -E2BIG;
+               goto bad;
+       }
+
+       hash_position = v->hash_start;
+       for (i = v->levels - 1; i >= 0; i--) {
+               sector_t s;
+               v->hash_level_block[i] = hash_position;
+               s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
+                                       >> ((i + 1) * v->hash_per_block_bits);
+               if (hash_position + s < hash_position) {
+                       ti->error = "Hash device offset overflow";
+                       r = -E2BIG;
+                       goto bad;
+               }
+               hash_position += s;
+       }
+       v->hash_blocks = hash_position;
+
+       v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
+               1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
+               dm_bufio_alloc_callback, NULL);
+       if (IS_ERR(v->bufio)) {
+               ti->error = "Cannot initialize dm-bufio";
+               r = PTR_ERR(v->bufio);
+               v->bufio = NULL;
+               goto bad;
+       }
+
+       if (dm_bufio_get_device_size(v->bufio) < v->hash_blocks) {
+               ti->error = "Hash device is too small";
+               r = -E2BIG;
+               goto bad;
+       }
+
+       /* WQ_UNBOUND greatly improves performance when running on ramdisk */
+       v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
+       if (!v->verify_wq) {
+               ti->error = "Cannot allocate workqueue";
+               r = -ENOMEM;
+               goto bad;
+       }
+
+       ti->per_bio_data_size = sizeof(struct dm_verity_io) +
+                               v->shash_descsize + v->digest_size * 2;
+
+       r = verity_fec_ctr(v);
+       if (r)
+               goto bad;
+
+       ti->per_bio_data_size = roundup(ti->per_bio_data_size,
+                                       __alignof__(struct dm_verity_io));
+
+       return 0;
+
+bad:
+       verity_dtr(ti);
+
+       return r;
+}
+EXPORT_SYMBOL_GPL(verity_ctr);
+
+static struct target_type verity_target = {
+       .name           = "verity",
+       .version        = {1, 3, 0},
+       .module         = THIS_MODULE,
+       .ctr            = verity_ctr,
+       .dtr            = verity_dtr,
+       .map            = verity_map,
+       .status         = verity_status,
+       .prepare_ioctl  = verity_prepare_ioctl,
+       .iterate_devices = verity_iterate_devices,
+       .io_hints       = verity_io_hints,
+};
+
+static int __init dm_verity_init(void)
+{
+       int r;
+
+       r = dm_register_target(&verity_target);
+       if (r < 0)
+               DMERR("register failed %d", r);
+
+       return r;
+}
+
+static void __exit dm_verity_exit(void)
+{
+       dm_unregister_target(&verity_target);
+}
+
+module_init(dm_verity_init);
+module_exit(dm_verity_exit);
+
+MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
+MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
+MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
+MODULE_DESCRIPTION(DM_NAME " target for transparent disk integrity checking");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
deleted file mode 100644 (file)
index ccf4188..0000000
+++ /dev/null
@@ -1,995 +0,0 @@
-/*
- * Copyright (C) 2012 Red Hat, Inc.
- *
- * Author: Mikulas Patocka <mpatocka@redhat.com>
- *
- * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
- *
- * This file is released under the GPLv2.
- *
- * In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set
- * default prefetch value. Data are read in "prefetch_cluster" chunks from the
- * hash device. Setting this greatly improves performance when data and hash
- * are on the same disk on different partitions on devices with poor random
- * access behavior.
- */
-
-#include "dm-bufio.h"
-
-#include <linux/module.h>
-#include <linux/device-mapper.h>
-#include <linux/reboot.h>
-#include <crypto/hash.h>
-
-#define DM_MSG_PREFIX                  "verity"
-
-#define DM_VERITY_ENV_LENGTH           42
-#define DM_VERITY_ENV_VAR_NAME         "DM_VERITY_ERR_BLOCK_NR"
-
-#define DM_VERITY_DEFAULT_PREFETCH_SIZE        262144
-
-#define DM_VERITY_MAX_LEVELS           63
-#define DM_VERITY_MAX_CORRUPTED_ERRS   100
-
-#define DM_VERITY_OPT_LOGGING          "ignore_corruption"
-#define DM_VERITY_OPT_RESTART          "restart_on_corruption"
-
-static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
-
-module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
-
-enum verity_mode {
-       DM_VERITY_MODE_EIO,
-       DM_VERITY_MODE_LOGGING,
-       DM_VERITY_MODE_RESTART
-};
-
-enum verity_block_type {
-       DM_VERITY_BLOCK_TYPE_DATA,
-       DM_VERITY_BLOCK_TYPE_METADATA
-};
-
-struct dm_verity {
-       struct dm_dev *data_dev;
-       struct dm_dev *hash_dev;
-       struct dm_target *ti;
-       struct dm_bufio_client *bufio;
-       char *alg_name;
-       struct crypto_shash *tfm;
-       u8 *root_digest;        /* digest of the root block */
-       u8 *salt;               /* salt: its size is salt_size */
-       unsigned salt_size;
-       sector_t data_start;    /* data offset in 512-byte sectors */
-       sector_t hash_start;    /* hash start in blocks */
-       sector_t data_blocks;   /* the number of data blocks */
-       sector_t hash_blocks;   /* the number of hash blocks */
-       unsigned char data_dev_block_bits;      /* log2(data blocksize) */
-       unsigned char hash_dev_block_bits;      /* log2(hash blocksize) */
-       unsigned char hash_per_block_bits;      /* log2(hashes in hash block) */
-       unsigned char levels;   /* the number of tree levels */
-       unsigned char version;
-       unsigned digest_size;   /* digest size for the current hash algorithm */
-       unsigned shash_descsize;/* the size of temporary space for crypto */
-       int hash_failed;        /* set to 1 if hash of any block failed */
-       enum verity_mode mode;  /* mode for handling verification errors */
-       unsigned corrupted_errs;/* Number of errors for corrupted blocks */
-
-       struct workqueue_struct *verify_wq;
-
-       /* starting blocks for each tree level. 0 is the lowest level. */
-       sector_t hash_level_block[DM_VERITY_MAX_LEVELS];
-};
-
-struct dm_verity_io {
-       struct dm_verity *v;
-
-       /* original values of bio->bi_end_io and bio->bi_private */
-       bio_end_io_t *orig_bi_end_io;
-       void *orig_bi_private;
-
-       sector_t block;
-       unsigned n_blocks;
-
-       struct bvec_iter iter;
-
-       struct work_struct work;
-
-       /*
-        * Three variably-size fields follow this struct:
-        *
-        * u8 hash_desc[v->shash_descsize];
-        * u8 real_digest[v->digest_size];
-        * u8 want_digest[v->digest_size];
-        *
-        * To access them use: io_hash_desc(), io_real_digest() and io_want_digest().
-        */
-};
-
-struct dm_verity_prefetch_work {
-       struct work_struct work;
-       struct dm_verity *v;
-       sector_t block;
-       unsigned n_blocks;
-};
-
-static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io)
-{
-       return (struct shash_desc *)(io + 1);
-}
-
-static u8 *io_real_digest(struct dm_verity *v, struct dm_verity_io *io)
-{
-       return (u8 *)(io + 1) + v->shash_descsize;
-}
-
-static u8 *io_want_digest(struct dm_verity *v, struct dm_verity_io *io)
-{
-       return (u8 *)(io + 1) + v->shash_descsize + v->digest_size;
-}
-
-/*
- * Auxiliary structure appended to each dm-bufio buffer. If the value
- * hash_verified is nonzero, hash of the block has been verified.
- *
- * The variable hash_verified is set to 0 when allocating the buffer, then
- * it can be changed to 1 and it is never reset to 0 again.
- *
- * There is no lock around this value, a race condition can at worst cause
- * that multiple processes verify the hash of the same buffer simultaneously
- * and write 1 to hash_verified simultaneously.
- * This condition is harmless, so we don't need locking.
- */
-struct buffer_aux {
-       int hash_verified;
-};
-
-/*
- * Initialize struct buffer_aux for a freshly created buffer.
- */
-static void dm_bufio_alloc_callback(struct dm_buffer *buf)
-{
-       struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
-
-       aux->hash_verified = 0;
-}
-
-/*
- * Translate input sector number to the sector number on the target device.
- */
-static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector)
-{
-       return v->data_start + dm_target_offset(v->ti, bi_sector);
-}
-
-/*
- * Return hash position of a specified block at a specified tree level
- * (0 is the lowest level).
- * The lowest "hash_per_block_bits"-bits of the result denote hash position
- * inside a hash block. The remaining bits denote location of the hash block.
- */
-static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
-                                        int level)
-{
-       return block >> (level * v->hash_per_block_bits);
-}
-
-static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
-                                sector_t *hash_block, unsigned *offset)
-{
-       sector_t position = verity_position_at_level(v, block, level);
-       unsigned idx;
-
-       *hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
-
-       if (!offset)
-               return;
-
-       idx = position & ((1 << v->hash_per_block_bits) - 1);
-       if (!v->version)
-               *offset = idx * v->digest_size;
-       else
-               *offset = idx << (v->hash_dev_block_bits - v->hash_per_block_bits);
-}
-
-/*
- * Handle verification errors.
- */
-static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
-                            unsigned long long block)
-{
-       char verity_env[DM_VERITY_ENV_LENGTH];
-       char *envp[] = { verity_env, NULL };
-       const char *type_str = "";
-       struct mapped_device *md = dm_table_get_md(v->ti->table);
-
-       /* Corruption should be visible in device status in all modes */
-       v->hash_failed = 1;
-
-       if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
-               goto out;
-
-       v->corrupted_errs++;
-
-       switch (type) {
-       case DM_VERITY_BLOCK_TYPE_DATA:
-               type_str = "data";
-               break;
-       case DM_VERITY_BLOCK_TYPE_METADATA:
-               type_str = "metadata";
-               break;
-       default:
-               BUG();
-       }
-
-       DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
-               block);
-
-       if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
-               DMERR("%s: reached maximum errors", v->data_dev->name);
-
-       snprintf(verity_env, DM_VERITY_ENV_LENGTH, "%s=%d,%llu",
-               DM_VERITY_ENV_VAR_NAME, type, block);
-
-       kobject_uevent_env(&disk_to_dev(dm_disk(md))->kobj, KOBJ_CHANGE, envp);
-
-out:
-       if (v->mode == DM_VERITY_MODE_LOGGING)
-               return 0;
-
-       if (v->mode == DM_VERITY_MODE_RESTART)
-               kernel_restart("dm-verity device corrupted");
-
-       return 1;
-}
-
-/*
- * Verify hash of a metadata block pertaining to the specified data block
- * ("block" argument) at a specified level ("level" argument).
- *
- * On successful return, io_want_digest(v, io) contains the hash value for
- * a lower tree level or for the data block (if we're at the lowest leve).
- *
- * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
- * If "skip_unverified" is false, unverified buffer is hashed and verified
- * against current value of io_want_digest(v, io).
- */
-static int verity_verify_level(struct dm_verity_io *io, sector_t block,
-                              int level, bool skip_unverified)
-{
-       struct dm_verity *v = io->v;
-       struct dm_buffer *buf;
-       struct buffer_aux *aux;
-       u8 *data;
-       int r;
-       sector_t hash_block;
-       unsigned offset;
-
-       verity_hash_at_level(v, block, level, &hash_block, &offset);
-
-       data = dm_bufio_read(v->bufio, hash_block, &buf);
-       if (IS_ERR(data))
-               return PTR_ERR(data);
-
-       aux = dm_bufio_get_aux_data(buf);
-
-       if (!aux->hash_verified) {
-               struct shash_desc *desc;
-               u8 *result;
-
-               if (skip_unverified) {
-                       r = 1;
-                       goto release_ret_r;
-               }
-
-               desc = io_hash_desc(v, io);
-               desc->tfm = v->tfm;
-               desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-               r = crypto_shash_init(desc);
-               if (r < 0) {
-                       DMERR("crypto_shash_init failed: %d", r);
-                       goto release_ret_r;
-               }
-
-               if (likely(v->version >= 1)) {
-                       r = crypto_shash_update(desc, v->salt, v->salt_size);
-                       if (r < 0) {
-                               DMERR("crypto_shash_update failed: %d", r);
-                               goto release_ret_r;
-                       }
-               }
-
-               r = crypto_shash_update(desc, data, 1 << v->hash_dev_block_bits);
-               if (r < 0) {
-                       DMERR("crypto_shash_update failed: %d", r);
-                       goto release_ret_r;
-               }
-
-               if (!v->version) {
-                       r = crypto_shash_update(desc, v->salt, v->salt_size);
-                       if (r < 0) {
-                               DMERR("crypto_shash_update failed: %d", r);
-                               goto release_ret_r;
-                       }
-               }
-
-               result = io_real_digest(v, io);
-               r = crypto_shash_final(desc, result);
-               if (r < 0) {
-                       DMERR("crypto_shash_final failed: %d", r);
-                       goto release_ret_r;
-               }
-               if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
-                       if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_METADATA,
-                                             hash_block)) {
-                               r = -EIO;
-                               goto release_ret_r;
-                       }
-               } else
-                       aux->hash_verified = 1;
-       }
-
-       data += offset;
-
-       memcpy(io_want_digest(v, io), data, v->digest_size);
-
-       dm_bufio_release(buf);
-       return 0;
-
-release_ret_r:
-       dm_bufio_release(buf);
-
-       return r;
-}
-
-/*
- * Verify one "dm_verity_io" structure.
- */
-static int verity_verify_io(struct dm_verity_io *io)
-{
-       struct dm_verity *v = io->v;
-       struct bio *bio = dm_bio_from_per_bio_data(io,
-                                                  v->ti->per_bio_data_size);
-       unsigned b;
-       int i;
-
-       for (b = 0; b < io->n_blocks; b++) {
-               struct shash_desc *desc;
-               u8 *result;
-               int r;
-               unsigned todo;
-
-               if (likely(v->levels)) {
-                       /*
-                        * First, we try to get the requested hash for
-                        * the current block. If the hash block itself is
-                        * verified, zero is returned. If it isn't, this
-                        * function returns 0 and we fall back to whole
-                        * chain verification.
-                        */
-                       int r = verity_verify_level(io, io->block + b, 0, true);
-                       if (likely(!r))
-                               goto test_block_hash;
-                       if (r < 0)
-                               return r;
-               }
-
-               memcpy(io_want_digest(v, io), v->root_digest, v->digest_size);
-
-               for (i = v->levels - 1; i >= 0; i--) {
-                       int r = verity_verify_level(io, io->block + b, i, false);
-                       if (unlikely(r))
-                               return r;
-               }
-
-test_block_hash:
-               desc = io_hash_desc(v, io);
-               desc->tfm = v->tfm;
-               desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-               r = crypto_shash_init(desc);
-               if (r < 0) {
-                       DMERR("crypto_shash_init failed: %d", r);
-                       return r;
-               }
-
-               if (likely(v->version >= 1)) {
-                       r = crypto_shash_update(desc, v->salt, v->salt_size);
-                       if (r < 0) {
-                               DMERR("crypto_shash_update failed: %d", r);
-                               return r;
-                       }
-               }
-               todo = 1 << v->data_dev_block_bits;
-               do {
-                       u8 *page;
-                       unsigned len;
-                       struct bio_vec bv = bio_iter_iovec(bio, io->iter);
-
-                       page = kmap_atomic(bv.bv_page);
-                       len = bv.bv_len;
-                       if (likely(len >= todo))
-                               len = todo;
-                       r = crypto_shash_update(desc, page + bv.bv_offset, len);
-                       kunmap_atomic(page);
-
-                       if (r < 0) {
-                               DMERR("crypto_shash_update failed: %d", r);
-                               return r;
-                       }
-
-                       bio_advance_iter(bio, &io->iter, len);
-                       todo -= len;
-               } while (todo);
-
-               if (!v->version) {
-                       r = crypto_shash_update(desc, v->salt, v->salt_size);
-                       if (r < 0) {
-                               DMERR("crypto_shash_update failed: %d", r);
-                               return r;
-                       }
-               }
-
-               result = io_real_digest(v, io);
-               r = crypto_shash_final(desc, result);
-               if (r < 0) {
-                       DMERR("crypto_shash_final failed: %d", r);
-                       return r;
-               }
-               if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
-                       if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
-                                             io->block + b))
-                               return -EIO;
-               }
-       }
-
-       return 0;
-}
-
-/*
- * End one "io" structure with a given error.
- */
-static void verity_finish_io(struct dm_verity_io *io, int error)
-{
-       struct dm_verity *v = io->v;
-       struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
-
-       bio->bi_end_io = io->orig_bi_end_io;
-       bio->bi_private = io->orig_bi_private;
-       bio->bi_error = error;
-
-       bio_endio(bio);
-}
-
-static void verity_work(struct work_struct *w)
-{
-       struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
-
-       verity_finish_io(io, verity_verify_io(io));
-}
-
-static void verity_end_io(struct bio *bio)
-{
-       struct dm_verity_io *io = bio->bi_private;
-
-       if (bio->bi_error) {
-               verity_finish_io(io, bio->bi_error);
-               return;
-       }
-
-       INIT_WORK(&io->work, verity_work);
-       queue_work(io->v->verify_wq, &io->work);
-}
-
-/*
- * Prefetch buffers for the specified io.
- * The root buffer is not prefetched, it is assumed that it will be cached
- * all the time.
- */
-static void verity_prefetch_io(struct work_struct *work)
-{
-       struct dm_verity_prefetch_work *pw =
-               container_of(work, struct dm_verity_prefetch_work, work);
-       struct dm_verity *v = pw->v;
-       int i;
-
-       for (i = v->levels - 2; i >= 0; i--) {
-               sector_t hash_block_start;
-               sector_t hash_block_end;
-               verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
-               verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
-               if (!i) {
-                       unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
-
-                       cluster >>= v->data_dev_block_bits;
-                       if (unlikely(!cluster))
-                               goto no_prefetch_cluster;
-
-                       if (unlikely(cluster & (cluster - 1)))
-                               cluster = 1 << __fls(cluster);
-
-                       hash_block_start &= ~(sector_t)(cluster - 1);
-                       hash_block_end |= cluster - 1;
-                       if (unlikely(hash_block_end >= v->hash_blocks))
-                               hash_block_end = v->hash_blocks - 1;
-               }
-no_prefetch_cluster:
-               dm_bufio_prefetch(v->bufio, hash_block_start,
-                                 hash_block_end - hash_block_start + 1);
-       }
-
-       kfree(pw);
-}
-
-static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
-{
-       struct dm_verity_prefetch_work *pw;
-
-       pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
-               GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
-
-       if (!pw)
-               return;
-
-       INIT_WORK(&pw->work, verity_prefetch_io);
-       pw->v = v;
-       pw->block = io->block;
-       pw->n_blocks = io->n_blocks;
-       queue_work(v->verify_wq, &pw->work);
-}
-
-/*
- * Bio map function. It allocates dm_verity_io structure and bio vector and
- * fills them. Then it issues prefetches and the I/O.
- */
-static int verity_map(struct dm_target *ti, struct bio *bio)
-{
-       struct dm_verity *v = ti->private;
-       struct dm_verity_io *io;
-
-       bio->bi_bdev = v->data_dev->bdev;
-       bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
-
-       if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
-           ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
-               DMERR_LIMIT("unaligned io");
-               return -EIO;
-       }
-
-       if (bio_end_sector(bio) >>
-           (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
-               DMERR_LIMIT("io out of range");
-               return -EIO;
-       }
-
-       if (bio_data_dir(bio) == WRITE)
-               return -EIO;
-
-       io = dm_per_bio_data(bio, ti->per_bio_data_size);
-       io->v = v;
-       io->orig_bi_end_io = bio->bi_end_io;
-       io->orig_bi_private = bio->bi_private;
-       io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
-       io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
-
-       bio->bi_end_io = verity_end_io;
-       bio->bi_private = io;
-       io->iter = bio->bi_iter;
-
-       verity_submit_prefetch(v, io);
-
-       generic_make_request(bio);
-
-       return DM_MAPIO_SUBMITTED;
-}
-
-/*
- * Status: V (valid) or C (corruption found)
- */
-static void verity_status(struct dm_target *ti, status_type_t type,
-                         unsigned status_flags, char *result, unsigned maxlen)
-{
-       struct dm_verity *v = ti->private;
-       unsigned sz = 0;
-       unsigned x;
-
-       switch (type) {
-       case STATUSTYPE_INFO:
-               DMEMIT("%c", v->hash_failed ? 'C' : 'V');
-               break;
-       case STATUSTYPE_TABLE:
-               DMEMIT("%u %s %s %u %u %llu %llu %s ",
-                       v->version,
-                       v->data_dev->name,
-                       v->hash_dev->name,
-                       1 << v->data_dev_block_bits,
-                       1 << v->hash_dev_block_bits,
-                       (unsigned long long)v->data_blocks,
-                       (unsigned long long)v->hash_start,
-                       v->alg_name
-                       );
-               for (x = 0; x < v->digest_size; x++)
-                       DMEMIT("%02x", v->root_digest[x]);
-               DMEMIT(" ");
-               if (!v->salt_size)
-                       DMEMIT("-");
-               else
-                       for (x = 0; x < v->salt_size; x++)
-                               DMEMIT("%02x", v->salt[x]);
-               if (v->mode != DM_VERITY_MODE_EIO) {
-                       DMEMIT(" 1 ");
-                       switch (v->mode) {
-                       case DM_VERITY_MODE_LOGGING:
-                               DMEMIT(DM_VERITY_OPT_LOGGING);
-                               break;
-                       case DM_VERITY_MODE_RESTART:
-                               DMEMIT(DM_VERITY_OPT_RESTART);
-                               break;
-                       default:
-                               BUG();
-                       }
-               }
-               break;
-       }
-}
-
-static int verity_prepare_ioctl(struct dm_target *ti,
-               struct block_device **bdev, fmode_t *mode)
-{
-       struct dm_verity *v = ti->private;
-
-       *bdev = v->data_dev->bdev;
-
-       if (v->data_start ||
-           ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
-               return 1;
-       return 0;
-}
-
-static int verity_iterate_devices(struct dm_target *ti,
-                                 iterate_devices_callout_fn fn, void *data)
-{
-       struct dm_verity *v = ti->private;
-
-       return fn(ti, v->data_dev, v->data_start, ti->len, data);
-}
-
-static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
-{
-       struct dm_verity *v = ti->private;
-
-       if (limits->logical_block_size < 1 << v->data_dev_block_bits)
-               limits->logical_block_size = 1 << v->data_dev_block_bits;
-
-       if (limits->physical_block_size < 1 << v->data_dev_block_bits)
-               limits->physical_block_size = 1 << v->data_dev_block_bits;
-
-       blk_limits_io_min(limits, limits->logical_block_size);
-}
-
-static void verity_dtr(struct dm_target *ti)
-{
-       struct dm_verity *v = ti->private;
-
-       if (v->verify_wq)
-               destroy_workqueue(v->verify_wq);
-
-       if (v->bufio)
-               dm_bufio_client_destroy(v->bufio);
-
-       kfree(v->salt);
-       kfree(v->root_digest);
-
-       if (v->tfm)
-               crypto_free_shash(v->tfm);
-
-       kfree(v->alg_name);
-
-       if (v->hash_dev)
-               dm_put_device(ti, v->hash_dev);
-
-       if (v->data_dev)
-               dm_put_device(ti, v->data_dev);
-
-       kfree(v);
-}
-
-/*
- * Target parameters:
- *     <version>       The current format is version 1.
- *                     Vsn 0 is compatible with original Chromium OS releases.
- *     <data device>
- *     <hash device>
- *     <data block size>
- *     <hash block size>
- *     <the number of data blocks>
- *     <hash start block>
- *     <algorithm>
- *     <digest>
- *     <salt>          Hex string or "-" if no salt.
- */
-static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
-{
-       struct dm_verity *v;
-       struct dm_arg_set as;
-       const char *opt_string;
-       unsigned int num, opt_params;
-       unsigned long long num_ll;
-       int r;
-       int i;
-       sector_t hash_position;
-       char dummy;
-
-       static struct dm_arg _args[] = {
-               {0, 1, "Invalid number of feature args"},
-       };
-
-       v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL);
-       if (!v) {
-               ti->error = "Cannot allocate verity structure";
-               return -ENOMEM;
-       }
-       ti->private = v;
-       v->ti = ti;
-
-       if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) {
-               ti->error = "Device must be readonly";
-               r = -EINVAL;
-               goto bad;
-       }
-
-       if (argc < 10) {
-               ti->error = "Not enough arguments";
-               r = -EINVAL;
-               goto bad;
-       }
-
-       if (sscanf(argv[0], "%u%c", &num, &dummy) != 1 ||
-           num > 1) {
-               ti->error = "Invalid version";
-               r = -EINVAL;
-               goto bad;
-       }
-       v->version = num;
-
-       r = dm_get_device(ti, argv[1], FMODE_READ, &v->data_dev);
-       if (r) {
-               ti->error = "Data device lookup failed";
-               goto bad;
-       }
-
-       r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev);
-       if (r) {
-               ti->error = "Data device lookup failed";
-               goto bad;
-       }
-
-       if (sscanf(argv[3], "%u%c", &num, &dummy) != 1 ||
-           !num || (num & (num - 1)) ||
-           num < bdev_logical_block_size(v->data_dev->bdev) ||
-           num > PAGE_SIZE) {
-               ti->error = "Invalid data device block size";
-               r = -EINVAL;
-               goto bad;
-       }
-       v->data_dev_block_bits = __ffs(num);
-
-       if (sscanf(argv[4], "%u%c", &num, &dummy) != 1 ||
-           !num || (num & (num - 1)) ||
-           num < bdev_logical_block_size(v->hash_dev->bdev) ||
-           num > INT_MAX) {
-               ti->error = "Invalid hash device block size";
-               r = -EINVAL;
-               goto bad;
-       }
-       v->hash_dev_block_bits = __ffs(num);
-
-       if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
-           (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
-           >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
-               ti->error = "Invalid data blocks";
-               r = -EINVAL;
-               goto bad;
-       }
-       v->data_blocks = num_ll;
-
-       if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) {
-               ti->error = "Data device is too small";
-               r = -EINVAL;
-               goto bad;
-       }
-
-       if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
-           (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
-           >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
-               ti->error = "Invalid hash start";
-               r = -EINVAL;
-               goto bad;
-       }
-       v->hash_start = num_ll;
-
-       v->alg_name = kstrdup(argv[7], GFP_KERNEL);
-       if (!v->alg_name) {
-               ti->error = "Cannot allocate algorithm name";
-               r = -ENOMEM;
-               goto bad;
-       }
-
-       v->tfm = crypto_alloc_shash(v->alg_name, 0, 0);
-       if (IS_ERR(v->tfm)) {
-               ti->error = "Cannot initialize hash function";
-               r = PTR_ERR(v->tfm);
-               v->tfm = NULL;
-               goto bad;
-       }
-       v->digest_size = crypto_shash_digestsize(v->tfm);
-       if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
-               ti->error = "Digest size too big";
-               r = -EINVAL;
-               goto bad;
-       }
-       v->shash_descsize =
-               sizeof(struct shash_desc) + crypto_shash_descsize(v->tfm);
-
-       v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
-       if (!v->root_digest) {
-               ti->error = "Cannot allocate root digest";
-               r = -ENOMEM;
-               goto bad;
-       }
-       if (strlen(argv[8]) != v->digest_size * 2 ||
-           hex2bin(v->root_digest, argv[8], v->digest_size)) {
-               ti->error = "Invalid root digest";
-               r = -EINVAL;
-               goto bad;
-       }
-
-       if (strcmp(argv[9], "-")) {
-               v->salt_size = strlen(argv[9]) / 2;
-               v->salt = kmalloc(v->salt_size, GFP_KERNEL);
-               if (!v->salt) {
-                       ti->error = "Cannot allocate salt";
-                       r = -ENOMEM;
-                       goto bad;
-               }
-               if (strlen(argv[9]) != v->salt_size * 2 ||
-                   hex2bin(v->salt, argv[9], v->salt_size)) {
-                       ti->error = "Invalid salt";
-                       r = -EINVAL;
-                       goto bad;
-               }
-       }
-
-       argv += 10;
-       argc -= 10;
-
-       /* Optional parameters */
-       if (argc) {
-               as.argc = argc;
-               as.argv = argv;
-
-               r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
-               if (r)
-                       goto bad;
-
-               while (opt_params) {
-                       opt_params--;
-                       opt_string = dm_shift_arg(&as);
-                       if (!opt_string) {
-                               ti->error = "Not enough feature arguments";
-                               r = -EINVAL;
-                               goto bad;
-                       }
-
-                       if (!strcasecmp(opt_string, DM_VERITY_OPT_LOGGING))
-                               v->mode = DM_VERITY_MODE_LOGGING;
-                       else if (!strcasecmp(opt_string, DM_VERITY_OPT_RESTART))
-                               v->mode = DM_VERITY_MODE_RESTART;
-                       else {
-                               ti->error = "Invalid feature arguments";
-                               r = -EINVAL;
-                               goto bad;
-                       }
-               }
-       }
-
-       v->hash_per_block_bits =
-               __fls((1 << v->hash_dev_block_bits) / v->digest_size);
-
-       v->levels = 0;
-       if (v->data_blocks)
-               while (v->hash_per_block_bits * v->levels < 64 &&
-                      (unsigned long long)(v->data_blocks - 1) >>
-                      (v->hash_per_block_bits * v->levels))
-                       v->levels++;
-
-       if (v->levels > DM_VERITY_MAX_LEVELS) {
-               ti->error = "Too many tree levels";
-               r = -E2BIG;
-               goto bad;
-       }
-
-       hash_position = v->hash_start;
-       for (i = v->levels - 1; i >= 0; i--) {
-               sector_t s;
-               v->hash_level_block[i] = hash_position;
-               s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
-                                       >> ((i + 1) * v->hash_per_block_bits);
-               if (hash_position + s < hash_position) {
-                       ti->error = "Hash device offset overflow";
-                       r = -E2BIG;
-                       goto bad;
-               }
-               hash_position += s;
-       }
-       v->hash_blocks = hash_position;
-
-       v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
-               1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
-               dm_bufio_alloc_callback, NULL);
-       if (IS_ERR(v->bufio)) {
-               ti->error = "Cannot initialize dm-bufio";
-               r = PTR_ERR(v->bufio);
-               v->bufio = NULL;
-               goto bad;
-       }
-
-       if (dm_bufio_get_device_size(v->bufio) < v->hash_blocks) {
-               ti->error = "Hash device is too small";
-               r = -E2BIG;
-               goto bad;
-       }
-
-       ti->per_bio_data_size = roundup(sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2, __alignof__(struct dm_verity_io));
-
-       /* WQ_UNBOUND greatly improves performance when running on ramdisk */
-       v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
-       if (!v->verify_wq) {
-               ti->error = "Cannot allocate workqueue";
-               r = -ENOMEM;
-               goto bad;
-       }
-
-       return 0;
-
-bad:
-       verity_dtr(ti);
-
-       return r;
-}
-
-static struct target_type verity_target = {
-       .name           = "verity",
-       .version        = {1, 2, 0},
-       .module         = THIS_MODULE,
-       .ctr            = verity_ctr,
-       .dtr            = verity_dtr,
-       .map            = verity_map,
-       .status         = verity_status,
-       .prepare_ioctl  = verity_prepare_ioctl,
-       .iterate_devices = verity_iterate_devices,
-       .io_hints       = verity_io_hints,
-};
-
-static int __init dm_verity_init(void)
-{
-       int r;
-
-       r = dm_register_target(&verity_target);
-       if (r < 0)
-               DMERR("register failed %d", r);
-
-       return r;
-}
-
-static void __exit dm_verity_exit(void)
-{
-       dm_unregister_target(&verity_target);
-}
-
-module_init(dm_verity_init);
-module_exit(dm_verity_exit);
-
-MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
-MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
-MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
-MODULE_DESCRIPTION(DM_NAME " target for transparent disk integrity checking");
-MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
new file mode 100644 (file)
index 0000000..75effca
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * Author: Mikulas Patocka <mpatocka@redhat.com>
+ *
+ * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef DM_VERITY_H
+#define DM_VERITY_H
+
+#include "dm-bufio.h"
+#include <linux/device-mapper.h>
+#include <crypto/hash.h>
+
+#define DM_VERITY_MAX_LEVELS           63
+
+enum verity_mode {
+       DM_VERITY_MODE_EIO,
+       DM_VERITY_MODE_LOGGING,
+       DM_VERITY_MODE_RESTART
+};
+
+enum verity_block_type {
+       DM_VERITY_BLOCK_TYPE_DATA,
+       DM_VERITY_BLOCK_TYPE_METADATA
+};
+
+struct dm_verity_fec;
+
+struct dm_verity {
+       struct dm_dev *data_dev;
+       struct dm_dev *hash_dev;
+       struct dm_target *ti;
+       struct dm_bufio_client *bufio;
+       char *alg_name;
+       struct crypto_shash *tfm;
+       u8 *root_digest;        /* digest of the root block */
+       u8 *salt;               /* salt: its size is salt_size */
+       u8 *zero_digest;        /* digest for a zero block */
+       unsigned salt_size;
+       sector_t data_start;    /* data offset in 512-byte sectors */
+       sector_t hash_start;    /* hash start in blocks */
+       sector_t data_blocks;   /* the number of data blocks */
+       sector_t hash_blocks;   /* the number of hash blocks */
+       unsigned char data_dev_block_bits;      /* log2(data blocksize) */
+       unsigned char hash_dev_block_bits;      /* log2(hash blocksize) */
+       unsigned char hash_per_block_bits;      /* log2(hashes in hash block) */
+       unsigned char levels;   /* the number of tree levels */
+       unsigned char version;
+       unsigned digest_size;   /* digest size for the current hash algorithm */
+       unsigned shash_descsize;/* the size of temporary space for crypto */
+       int hash_failed;        /* set to 1 if hash of any block failed */
+       enum verity_mode mode;  /* mode for handling verification errors */
+       unsigned corrupted_errs;/* Number of errors for corrupted blocks */
+
+       struct workqueue_struct *verify_wq;
+
+       /* starting blocks for each tree level. 0 is the lowest level. */
+       sector_t hash_level_block[DM_VERITY_MAX_LEVELS];
+
+       struct dm_verity_fec *fec;      /* forward error correction */
+};
+
+struct dm_verity_io {
+       struct dm_verity *v;
+
+       /* original value of bio->bi_end_io */
+       bio_end_io_t *orig_bi_end_io;
+
+       sector_t block;
+       unsigned n_blocks;
+
+       struct bvec_iter iter;
+
+       struct work_struct work;
+
+       /*
+        * Three variably-size fields follow this struct:
+        *
+        * u8 hash_desc[v->shash_descsize];
+        * u8 real_digest[v->digest_size];
+        * u8 want_digest[v->digest_size];
+        *
+        * To access them use: verity_io_hash_desc(), verity_io_real_digest()
+        * and verity_io_want_digest().
+        */
+};
+
+static inline struct shash_desc *verity_io_hash_desc(struct dm_verity *v,
+                                                    struct dm_verity_io *io)
+{
+       return (struct shash_desc *)(io + 1);
+}
+
+static inline u8 *verity_io_real_digest(struct dm_verity *v,
+                                       struct dm_verity_io *io)
+{
+       return (u8 *)(io + 1) + v->shash_descsize;
+}
+
+static inline u8 *verity_io_want_digest(struct dm_verity *v,
+                                       struct dm_verity_io *io)
+{
+       return (u8 *)(io + 1) + v->shash_descsize + v->digest_size;
+}
+
+static inline u8 *verity_io_digest_end(struct dm_verity *v,
+                                      struct dm_verity_io *io)
+{
+       return verity_io_want_digest(v, io) + v->digest_size;
+}
+
+extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+                              struct bvec_iter *iter,
+                              int (*process)(struct dm_verity *v,
+                                             struct dm_verity_io *io,
+                                             u8 *data, size_t len));
+
+extern int verity_hash(struct dm_verity *v, struct shash_desc *desc,
+                      const u8 *data, size_t len, u8 *digest);
+
+extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
+                                sector_t block, u8 *digest, bool *is_zero);
+
+extern void verity_status(struct dm_target *ti, status_type_t type,
+                       unsigned status_flags, char *result, unsigned maxlen);
+extern int verity_prepare_ioctl(struct dm_target *ti,
+                struct block_device **bdev, fmode_t *mode);
+extern int verity_iterate_devices(struct dm_target *ti,
+                               iterate_devices_callout_fn fn, void *data);
+extern void verity_io_hints(struct dm_target *ti, struct queue_limits *limits);
+extern void verity_dtr(struct dm_target *ti);
+extern int verity_ctr(struct dm_target *ti, unsigned argc, char **argv);
+extern int verity_map(struct dm_target *ti, struct bio *bio);
+#endif /* DM_VERITY_H */
index 4e941f00b6008f286b839d8e2f72c41978cc98e5..317ef63ee78999d673f85b62af14bed7e664549e 100644 (file)
@@ -1403,11 +1403,14 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
         * in order to avoid troubles during device release.
         */
        kfree(priv->ctrl.fname);
+       priv->ctrl.fname = NULL;
        memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
        if (p->fname) {
                priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
-               if (priv->ctrl.fname == NULL)
+               if (priv->ctrl.fname == NULL) {
                        rc = -ENOMEM;
+                       goto unlock;
+               }
        }
 
        /*
@@ -1439,6 +1442,7 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
                } else
                        priv->state = XC2028_WAITING_FIRMWARE;
        }
+unlock:
        mutex_unlock(&priv->lock);
 
        return rc;
index 4bf7d50b1bc7cc8ccb55171d9ca76ac3bd6fe114..06eddc0cb24fdb8fb226f952413ab749d92ff5ae 100644 (file)
@@ -525,6 +525,18 @@ config VEXPRESS_SYSCFG
          bus. System Configuration interface is one of the possible means
          of generating transactions on this bus.
 
+config UID_CPUTIME
+       bool "Per-UID cpu time statistics"
+       depends on PROFILING
+       help
+         Per UID based cpu time statistics exported to /proc/uid_cputime
+
+config MEMORY_STATE_TIME
+       tristate "Memory freq/bandwidth time statistics"
+       depends on PROFILING
+       help
+         Memory time statistics exported to /sys/kernel/memory_state_time
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
index 537d7f3b78da9a35afcf4fe41053bbcbef1dd1f2..b76b4c9fe10427a1096b3ddd14033cbceebe6213 100644 (file)
@@ -56,3 +56,5 @@ obj-$(CONFIG_GENWQE)          += genwqe/
 obj-$(CONFIG_ECHO)             += echo/
 obj-$(CONFIG_VEXPRESS_SYSCFG)  += vexpress-syscfg.o
 obj-$(CONFIG_CXL_BASE)         += cxl/
+obj-$(CONFIG_UID_CPUTIME) += uid_cputime.o
+obj-$(CONFIG_MEMORY_STATE_TIME) += memory_state_time.o
diff --git a/drivers/misc/memory_state_time.c b/drivers/misc/memory_state_time.c
new file mode 100644 (file)
index 0000000..34c797a
--- /dev/null
@@ -0,0 +1,454 @@
+/* drivers/misc/memory_state_time.c
+ *
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/hashtable.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/memory-state-time.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/time.h>
+#include <linux/timekeeping.h>
+#include <linux/workqueue.h>
+
+#define KERNEL_ATTR_RO(_name) \
+static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
+
+#define KERNEL_ATTR_RW(_name) \
+static struct kobj_attribute _name##_attr = \
+       __ATTR(_name, 0644, _name##_show, _name##_store)
+
+#define FREQ_HASH_BITS 4
+DECLARE_HASHTABLE(freq_hash_table, FREQ_HASH_BITS);
+
+static DEFINE_MUTEX(mem_lock);
+
+#define TAG "memory_state_time"
+#define BW_NODE "/soc/memory-state-time"
+#define FREQ_TBL "freq-tbl"
+#define BW_TBL "bw-buckets"
+#define NUM_SOURCES "num-sources"
+
+#define LOWEST_FREQ 2
+
+static int curr_bw;
+static int curr_freq;
+static u32 *bw_buckets;
+static u32 *freq_buckets;
+static int num_freqs;
+static int num_buckets;
+static int registered_bw_sources;
+static u64 last_update;
+static bool init_success;
+static struct workqueue_struct *memory_wq;
+static u32 num_sources = 10;
+static int *bandwidths;
+
+struct freq_entry {
+       int freq;
+       u64 *buckets; /* Bandwidth buckets. */
+       struct hlist_node hash;
+};
+
+struct queue_container {
+       struct work_struct update_state;
+       int value;
+       u64 time_now;
+       int id;
+       struct mutex *lock;
+};
+
+static int find_bucket(int bw)
+{
+       int i;
+
+       if (bw_buckets != NULL) {
+               for (i = 0; i < num_buckets; i++) {
+                       if (bw_buckets[i] > bw) {
+                               pr_debug("Found bucket %d for bandwidth %d\n",
+                                       i, bw);
+                               return i;
+                       }
+               }
+               return num_buckets - 1;
+       }
+       return 0;
+}
+
+static u64 get_time_diff(u64 time_now)
+{
+       u64 ms;
+
+       ms = time_now - last_update;
+       last_update = time_now;
+       return ms;
+}
+
+static ssize_t show_stat_show(struct kobject *kobj,
+               struct kobj_attribute *attr, char *buf)
+{
+       int i, j;
+       int len = 0;
+       struct freq_entry *freq_entry;
+
+       for (i = 0; i < num_freqs; i++) {
+               hash_for_each_possible(freq_hash_table, freq_entry, hash,
+                               freq_buckets[i]) {
+                       if (freq_entry->freq == freq_buckets[i]) {
+                               len += scnprintf(buf + len, PAGE_SIZE - len,
+                                               "%d ", freq_buckets[i]);
+                               if (len >= PAGE_SIZE)
+                                       break;
+                               for (j = 0; j < num_buckets; j++) {
+                                       len += scnprintf(buf + len,
+                                                       PAGE_SIZE - len,
+                                                       "%llu ",
+                                                       freq_entry->buckets[j]);
+                               }
+                               len += scnprintf(buf + len, PAGE_SIZE - len,
+                                               "\n");
+                       }
+               }
+       }
+       pr_debug("Current Time: %llu\n", ktime_get_boot_ns());
+       return len;
+}
+KERNEL_ATTR_RO(show_stat);
+
+static void update_table(u64 time_now)
+{
+       struct freq_entry *freq_entry;
+
+       pr_debug("Last known bw %d freq %d\n", curr_bw, curr_freq);
+       hash_for_each_possible(freq_hash_table, freq_entry, hash, curr_freq) {
+               if (curr_freq == freq_entry->freq) {
+                       freq_entry->buckets[find_bucket(curr_bw)]
+                                       += get_time_diff(time_now);
+                       break;
+               }
+       }
+}
+
+static bool freq_exists(int freq)
+{
+       int i;
+
+       for (i = 0; i < num_freqs; i++) {
+               if (freq == freq_buckets[i])
+                       return true;
+       }
+       return false;
+}
+
+static int calculate_total_bw(int bw, int index)
+{
+       int i;
+       int total_bw = 0;
+
+       pr_debug("memory_state_time New bw %d for id %d\n", bw, index);
+       bandwidths[index] = bw;
+       for (i = 0; i < registered_bw_sources; i++)
+               total_bw += bandwidths[i];
+       return total_bw;
+}
+
+static void freq_update_do_work(struct work_struct *work)
+{
+       struct queue_container *freq_state_update
+                       = container_of(work, struct queue_container,
+                       update_state);
+       if (freq_state_update) {
+               mutex_lock(&mem_lock);
+               update_table(freq_state_update->time_now);
+               curr_freq = freq_state_update->value;
+               mutex_unlock(&mem_lock);
+               kfree(freq_state_update);
+       }
+}
+
+static void bw_update_do_work(struct work_struct *work)
+{
+       struct queue_container *bw_state_update
+                       = container_of(work, struct queue_container,
+                       update_state);
+       if (bw_state_update) {
+               mutex_lock(&mem_lock);
+               update_table(bw_state_update->time_now);
+               curr_bw = calculate_total_bw(bw_state_update->value,
+                               bw_state_update->id);
+               mutex_unlock(&mem_lock);
+               kfree(bw_state_update);
+       }
+}
+
+static void memory_state_freq_update(struct memory_state_update_block *ub,
+               int value)
+{
+       if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+               if (freq_exists(value) && init_success) {
+                       struct queue_container *freq_container
+                               = kmalloc(sizeof(struct queue_container),
+                               GFP_KERNEL);
+                       if (!freq_container)
+                               return;
+                       INIT_WORK(&freq_container->update_state,
+                                       freq_update_do_work);
+                       freq_container->time_now = ktime_get_boot_ns();
+                       freq_container->value = value;
+                       pr_debug("Scheduling freq update in work queue\n");
+                       queue_work(memory_wq, &freq_container->update_state);
+               } else {
+                       pr_debug("Freq does not exist.\n");
+               }
+       }
+}
+
+static void memory_state_bw_update(struct memory_state_update_block *ub,
+               int value)
+{
+       if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+               if (init_success) {
+                       struct queue_container *bw_container
+                               = kmalloc(sizeof(struct queue_container),
+                               GFP_KERNEL);
+                       if (!bw_container)
+                               return;
+                       INIT_WORK(&bw_container->update_state,
+                                       bw_update_do_work);
+                       bw_container->time_now = ktime_get_boot_ns();
+                       bw_container->value = value;
+                       bw_container->id = ub->id;
+                       pr_debug("Scheduling bandwidth update in work queue\n");
+                       queue_work(memory_wq, &bw_container->update_state);
+               }
+       }
+}
+
+struct memory_state_update_block *memory_state_register_frequency_source(void)
+{
+       struct memory_state_update_block *block;
+
+       if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+               pr_debug("Allocating frequency source\n");
+               block = kmalloc(sizeof(struct memory_state_update_block),
+                                       GFP_KERNEL);
+               if (!block)
+                       return NULL;
+               block->update_call = memory_state_freq_update;
+               return block;
+       }
+       pr_err("Config option disabled.\n");
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(memory_state_register_frequency_source);
+
+struct memory_state_update_block *memory_state_register_bandwidth_source(void)
+{
+       struct memory_state_update_block *block;
+
+       if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+               pr_debug("Allocating bandwidth source %d\n",
+                               registered_bw_sources);
+               block = kmalloc(sizeof(struct memory_state_update_block),
+                                       GFP_KERNEL);
+               if (!block)
+                       return NULL;
+               block->update_call = memory_state_bw_update;
+               if (registered_bw_sources < num_sources) {
+                       block->id = registered_bw_sources++;
+               } else {
+                       pr_err("Unable to allocate source; max number reached\n");
+                       kfree(block);
+                       return NULL;
+               }
+               return block;
+       }
+       pr_err("Config option disabled.\n");
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(memory_state_register_bandwidth_source);
+
+/* Buckets are designated by their maximum.
+ * Returns the buckets decided by the capability of the device.
+ */
+static int get_bw_buckets(struct device *dev)
+{
+       int ret, lenb;
+       struct device_node *node = dev->of_node;
+
+       of_property_read_u32(node, NUM_SOURCES, &num_sources);
+       if (of_find_property(node, BW_TBL, &lenb)) {
+               bandwidths = devm_kzalloc(dev,
+                               sizeof(*bandwidths) * num_sources, GFP_KERNEL);
+               if (!bandwidths)
+                       return -ENOMEM;
+               lenb /= sizeof(*bw_buckets);
+               bw_buckets = devm_kzalloc(dev, lenb * sizeof(*bw_buckets),
+                               GFP_KERNEL);
+               if (!bw_buckets) {
+                       devm_kfree(dev, bandwidths);
+                       return -ENOMEM;
+               }
+               ret = of_property_read_u32_array(node, BW_TBL, bw_buckets,
+                               lenb);
+               if (ret < 0) {
+                       devm_kfree(dev, bandwidths);
+                       devm_kfree(dev, bw_buckets);
+                       pr_err("Unable to read bandwidth table from device tree.\n");
+                       return ret;
+               }
+       }
+       curr_bw = 0;
+       num_buckets = lenb;
+       return 0;
+}
+
+/* Adds struct freq_entry nodes to the hashtable for each compatible frequency.
+ * Returns the supported number of frequencies.
+ */
+static int freq_buckets_init(struct device *dev)
+{
+       struct freq_entry *freq_entry;
+       int i;
+       int ret, lenf;
+       struct device_node *node = dev->of_node;
+
+       if (of_find_property(node, FREQ_TBL, &lenf)) {
+               lenf /= sizeof(*freq_buckets);
+               freq_buckets = devm_kzalloc(dev, lenf * sizeof(*freq_buckets),
+                               GFP_KERNEL);
+               if (!freq_buckets)
+                       return -ENOMEM;
+               pr_debug("freqs found len %d\n", lenf);
+               ret = of_property_read_u32_array(node, FREQ_TBL, freq_buckets,
+                               lenf);
+               if (ret < 0) {
+                       devm_kfree(dev, freq_buckets);
+                       pr_err("Unable to read frequency table from device tree.\n");
+                       return ret;
+               }
+               pr_debug("ret freq %d\n", ret);
+       }
+       num_freqs = lenf;
+       curr_freq = freq_buckets[LOWEST_FREQ];
+
+       for (i = 0; i < num_freqs; i++) {
+               freq_entry = devm_kzalloc(dev, sizeof(struct freq_entry),
+                               GFP_KERNEL);
+               if (!freq_entry)
+                       return -ENOMEM;
+               freq_entry->buckets = devm_kzalloc(dev, sizeof(u64)*num_buckets,
+                               GFP_KERNEL);
+               if (!freq_entry->buckets) {
+                       devm_kfree(dev, freq_entry);
+                       return -ENOMEM;
+               }
+               pr_debug("memory_state_time Adding freq to ht %d\n",
+                               freq_buckets[i]);
+               freq_entry->freq = freq_buckets[i];
+               hash_add(freq_hash_table, &freq_entry->hash, freq_buckets[i]);
+       }
+       return 0;
+}
+
+struct kobject *memory_kobj;
+EXPORT_SYMBOL_GPL(memory_kobj);
+
+static struct attribute *memory_attrs[] = {
+       &show_stat_attr.attr,
+       NULL
+};
+
+static struct attribute_group memory_attr_group = {
+       .attrs = memory_attrs,
+};
+
+static int memory_state_time_probe(struct platform_device *pdev)
+{
+       int error;
+
+       error = get_bw_buckets(&pdev->dev);
+       if (error)
+               return error;
+       error = freq_buckets_init(&pdev->dev);
+       if (error)
+               return error;
+       last_update = ktime_get_boot_ns();
+       init_success = true;
+
+       pr_debug("memory_state_time initialized with num_freqs %d\n",
+                       num_freqs);
+       return 0;
+}
+
+static const struct of_device_id match_table[] = {
+       { .compatible = "memory-state-time" },
+       {}
+};
+
+static struct platform_driver memory_state_time_driver = {
+       .probe = memory_state_time_probe,
+       .driver = {
+               .name = "memory-state-time",
+               .of_match_table = match_table,
+               .owner = THIS_MODULE,
+       },
+};
+
+static int __init memory_state_time_init(void)
+{
+       int error;
+
+       hash_init(freq_hash_table);
+       memory_wq = create_singlethread_workqueue("memory_wq");
+       if (!memory_wq) {
+               pr_err("Unable to create workqueue.\n");
+               return -EINVAL;
+       }
+       /*
+        * Create sys/kernel directory for memory_state_time.
+        */
+       memory_kobj = kobject_create_and_add(TAG, kernel_kobj);
+       if (!memory_kobj) {
+               pr_err("Unable to allocate memory_kobj for sysfs directory.\n");
+               error = -ENOMEM;
+               goto wq;
+       }
+       error = sysfs_create_group(memory_kobj, &memory_attr_group);
+       if (error) {
+               pr_err("Unable to create sysfs folder.\n");
+               goto kobj;
+       }
+
+       error = platform_driver_register(&memory_state_time_driver);
+       if (error) {
+               pr_err("Unable to register memory_state_time platform driver.\n");
+               goto group;
+       }
+       return 0;
+
+group: sysfs_remove_group(memory_kobj, &memory_attr_group);
+kobj:  kobject_put(memory_kobj);
+wq:    destroy_workqueue(memory_wq);
+       return error;
+}
+module_init(memory_state_time_init);
diff --git a/drivers/misc/uid_cputime.c b/drivers/misc/uid_cputime.c
new file mode 100644 (file)
index 0000000..c1ad524
--- /dev/null
@@ -0,0 +1,240 @@
+/* drivers/misc/uid_cputime.c
+ *
+ * Copyright (C) 2014 - 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/profile.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#define UID_HASH_BITS  10
+DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
+
+static DEFINE_MUTEX(uid_lock);
+static struct proc_dir_entry *parent;
+
+struct uid_entry {
+       uid_t uid;
+       cputime_t utime;
+       cputime_t stime;
+       cputime_t active_utime;
+       cputime_t active_stime;
+       struct hlist_node hash;
+};
+
+static struct uid_entry *find_uid_entry(uid_t uid)
+{
+       struct uid_entry *uid_entry;
+       hash_for_each_possible(hash_table, uid_entry, hash, uid) {
+               if (uid_entry->uid == uid)
+                       return uid_entry;
+       }
+       return NULL;
+}
+
+static struct uid_entry *find_or_register_uid(uid_t uid)
+{
+       struct uid_entry *uid_entry;
+
+       uid_entry = find_uid_entry(uid);
+       if (uid_entry)
+               return uid_entry;
+
+       uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC);
+       if (!uid_entry)
+               return NULL;
+
+       uid_entry->uid = uid;
+
+       hash_add(hash_table, &uid_entry->hash, uid);
+
+       return uid_entry;
+}
+
+static int uid_stat_show(struct seq_file *m, void *v)
+{
+       struct uid_entry *uid_entry;
+       struct task_struct *task, *temp;
+       cputime_t utime;
+       cputime_t stime;
+       unsigned long bkt;
+
+       mutex_lock(&uid_lock);
+
+       hash_for_each(hash_table, bkt, uid_entry, hash) {
+               uid_entry->active_stime = 0;
+               uid_entry->active_utime = 0;
+       }
+
+       read_lock(&tasklist_lock);
+       do_each_thread(temp, task) {
+               uid_entry = find_or_register_uid(from_kuid_munged(
+                       current_user_ns(), task_uid(task)));
+               if (!uid_entry) {
+                       read_unlock(&tasklist_lock);
+                       mutex_unlock(&uid_lock);
+                       pr_err("%s: failed to find the uid_entry for uid %d\n",
+                               __func__, from_kuid_munged(current_user_ns(),
+                               task_uid(task)));
+                       return -ENOMEM;
+               }
+               task_cputime_adjusted(task, &utime, &stime);
+               uid_entry->active_utime += utime;
+               uid_entry->active_stime += stime;
+       } while_each_thread(temp, task);
+       read_unlock(&tasklist_lock);
+
+       hash_for_each(hash_table, bkt, uid_entry, hash) {
+               cputime_t total_utime = uid_entry->utime +
+                                                       uid_entry->active_utime;
+               cputime_t total_stime = uid_entry->stime +
+                                                       uid_entry->active_stime;
+               seq_printf(m, "%d: %llu %llu\n", uid_entry->uid,
+                       (unsigned long long)jiffies_to_msecs(
+                               cputime_to_jiffies(total_utime)) * USEC_PER_MSEC,
+                       (unsigned long long)jiffies_to_msecs(
+                               cputime_to_jiffies(total_stime)) * USEC_PER_MSEC);
+       }
+
+       mutex_unlock(&uid_lock);
+       return 0;
+}
+
+static int uid_stat_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, uid_stat_show, PDE_DATA(inode));
+}
+
+static const struct file_operations uid_stat_fops = {
+       .open           = uid_stat_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int uid_remove_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, NULL, NULL);
+}
+
+static ssize_t uid_remove_write(struct file *file,
+                       const char __user *buffer, size_t count, loff_t *ppos)
+{
+       struct uid_entry *uid_entry;
+       struct hlist_node *tmp;
+       char uids[128];
+       char *start_uid, *end_uid = NULL;
+       long int uid_start = 0, uid_end = 0;
+
+       if (count >= sizeof(uids))
+               count = sizeof(uids) - 1;
+
+       if (copy_from_user(uids, buffer, count))
+               return -EFAULT;
+
+       uids[count] = '\0';
+       end_uid = uids;
+       start_uid = strsep(&end_uid, "-");
+
+       if (!start_uid || !end_uid)
+               return -EINVAL;
+
+       if (kstrtol(start_uid, 10, &uid_start) != 0 ||
+               kstrtol(end_uid, 10, &uid_end) != 0) {
+               return -EINVAL;
+       }
+       mutex_lock(&uid_lock);
+
+       for (; uid_start <= uid_end; uid_start++) {
+               hash_for_each_possible_safe(hash_table, uid_entry, tmp,
+                                                       hash, (uid_t)uid_start) {
+                       if (uid_start == uid_entry->uid) {
+                               hash_del(&uid_entry->hash);
+                               kfree(uid_entry);
+                       }
+               }
+       }
+
+       mutex_unlock(&uid_lock);
+       return count;
+}
+
+static const struct file_operations uid_remove_fops = {
+       .open           = uid_remove_open,
+       .release        = single_release,
+       .write          = uid_remove_write,
+};
+
+static int process_notifier(struct notifier_block *self,
+                       unsigned long cmd, void *v)
+{
+       struct task_struct *task = v;
+       struct uid_entry *uid_entry;
+       cputime_t utime, stime;
+       uid_t uid;
+
+       if (!task)
+               return NOTIFY_OK;
+
+       mutex_lock(&uid_lock);
+       uid = from_kuid_munged(current_user_ns(), task_uid(task));
+       uid_entry = find_or_register_uid(uid);
+       if (!uid_entry) {
+               pr_err("%s: failed to find uid %d\n", __func__, uid);
+               goto exit;
+       }
+
+       task_cputime_adjusted(task, &utime, &stime);
+       uid_entry->utime += utime;
+       uid_entry->stime += stime;
+
+exit:
+       mutex_unlock(&uid_lock);
+       return NOTIFY_OK;
+}
+
+static struct notifier_block process_notifier_block = {
+       .notifier_call  = process_notifier,
+};
+
+static int __init proc_uid_cputime_init(void)
+{
+       hash_init(hash_table);
+
+       parent = proc_mkdir("uid_cputime", NULL);
+       if (!parent) {
+               pr_err("%s: failed to create proc entry\n", __func__);
+               return -ENOMEM;
+       }
+
+       proc_create_data("remove_uid_range", S_IWUGO, parent, &uid_remove_fops,
+                                       NULL);
+
+       proc_create_data("show_uid_stat", S_IRUGO, parent, &uid_stat_fops,
+                                       NULL);
+
+       profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
+
+       return 0;
+}
+
+early_initcall(proc_uid_cputime_init);
index 5562308699bc292d5dce1e63fdea2452c38164f5..6142ec1b9dfbbc2a8a368977d2f3e40e022556c7 100644 (file)
@@ -68,3 +68,15 @@ config MMC_TEST
 
          This driver is only of interest to those developing or
          testing a host driver. Most people should say N here.
+
+config MMC_SIMULATE_MAX_SPEED
+       bool "Turn on maximum speed control per block device"
+       depends on MMC_BLOCK
+       help
+         Say Y here to enable MMC device speed limiting. Used to test and
+         simulate the behavior of the system when confronted with a slow MMC.
+
+         Enables max_read_speed, max_write_speed and cache_size attributes to
+         control the write or read maximum KB/second speed behaviors.
+
+         If unsure, say N here.
index f2b733275a0a4a22b6ef0cd4b386237d0933c599..8d169d5a8b017d52a3339489cb751365dca54d05 100644 (file)
@@ -36,6 +36,8 @@
 #include <linux/compat.h>
 #include <linux/pm_runtime.h>
 
+#include <trace/events/mmc.h>
+
 #include <linux/mmc/ioctl.h>
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
 #include "queue.h"
 
 MODULE_ALIAS("mmc:block");
-
-#ifdef KERNEL
 #ifdef MODULE_PARAM_PREFIX
 #undef MODULE_PARAM_PREFIX
 #endif
 #define MODULE_PARAM_PREFIX "mmcblk."
-#endif
 
 #define INAND_CMD38_ARG_EXT_CSD  113
 #define INAND_CMD38_ARG_ERASE    0x00
@@ -171,11 +170,7 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 
 static inline int mmc_get_devidx(struct gendisk *disk)
 {
-       int devmaj = MAJOR(disk_devt(disk));
-       int devidx = MINOR(disk_devt(disk)) / perdev_minors;
-
-       if (!devmaj)
-               devidx = disk->first_minor / perdev_minors;
+       int devidx = disk->first_minor / perdev_minors;
        return devidx;
 }
 
@@ -293,6 +288,250 @@ out:
        return ret;
 }
 
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+
+static int max_read_speed, max_write_speed, cache_size = 4;
+
+module_param(max_read_speed, int, S_IRUSR | S_IRGRP);
+MODULE_PARM_DESC(max_read_speed, "maximum KB/s read speed 0=off");
+module_param(max_write_speed, int, S_IRUSR | S_IRGRP);
+MODULE_PARM_DESC(max_write_speed, "maximum KB/s write speed 0=off");
+module_param(cache_size, int, S_IRUSR | S_IRGRP);
+MODULE_PARM_DESC(cache_size, "MB high speed memory or SLC cache");
+
+/*
+ * helper macros and expectations:
+ *  size    - unsigned long number of bytes
+ *  jiffies - unsigned long HZ timestamp difference
+ *  speed   - unsigned KB/s transfer rate
+ */
+#define size_and_speed_to_jiffies(size, speed) \
+               ((size) * HZ / (speed) / 1024UL)
+#define jiffies_and_speed_to_size(jiffies, speed) \
+               (((speed) * (jiffies) * 1024UL) / HZ)
+#define jiffies_and_size_to_speed(jiffies, size) \
+               ((size) * HZ / (jiffies) / 1024UL)
+
+/* Limits to report warning */
+/* jiffies_and_size_to_speed(10*HZ, queue_max_hw_sectors(q) * 512UL) ~ 25 */
+#define MIN_SPEED(q) 250 /* 10 times faster than a floppy disk */
+#define MAX_SPEED(q) jiffies_and_size_to_speed(1, queue_max_sectors(q) * 512UL)
+
+#define speed_valid(speed) ((speed) > 0)
+
+static const char off[] = "off\n";
+
+static int max_speed_show(int speed, char *buf)
+{
+       if (speed)
+               return scnprintf(buf, PAGE_SIZE, "%uKB/s\n", speed);
+       else
+               return scnprintf(buf, PAGE_SIZE, off);
+}
+
+static int max_speed_store(const char *buf, struct request_queue *q)
+{
+       unsigned int limit, set = 0;
+
+       if (!strncasecmp(off, buf, sizeof(off) - 2))
+               return set;
+       if (kstrtouint(buf, 0, &set) || (set > INT_MAX))
+               return -EINVAL;
+       if (set == 0)
+               return set;
+       limit = MAX_SPEED(q);
+       if (set > limit)
+               pr_warn("max speed %u ineffective above %u\n", set, limit);
+       limit = MIN_SPEED(q);
+       if (set < limit)
+               pr_warn("max speed %u painful below %u\n", set, limit);
+       return set;
+}
+
+static ssize_t max_write_speed_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+       int ret = max_speed_show(atomic_read(&md->queue.max_write_speed), buf);
+
+       mmc_blk_put(md);
+       return ret;
+}
+
+static ssize_t max_write_speed_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+       int set = max_speed_store(buf, md->queue.queue);
+
+       if (set < 0) {
+               mmc_blk_put(md);
+               return set;
+       }
+
+       atomic_set(&md->queue.max_write_speed, set);
+       mmc_blk_put(md);
+       return count;
+}
+
+static const DEVICE_ATTR(max_write_speed, S_IRUGO | S_IWUSR,
+       max_write_speed_show, max_write_speed_store);
+
+static ssize_t max_read_speed_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+       int ret = max_speed_show(atomic_read(&md->queue.max_read_speed), buf);
+
+       mmc_blk_put(md);
+       return ret;
+}
+
+static ssize_t max_read_speed_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+       int set = max_speed_store(buf, md->queue.queue);
+
+       if (set < 0) {
+               mmc_blk_put(md);
+               return set;
+       }
+
+       atomic_set(&md->queue.max_read_speed, set);
+       mmc_blk_put(md);
+       return count;
+}
+
+static const DEVICE_ATTR(max_read_speed, S_IRUGO | S_IWUSR,
+       max_read_speed_show, max_read_speed_store);
+
+static ssize_t cache_size_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+       struct mmc_queue *mq = &md->queue;
+       int cache_size = atomic_read(&mq->cache_size);
+       int ret;
+
+       if (!cache_size)
+               ret = scnprintf(buf, PAGE_SIZE, off);
+       else {
+               int speed = atomic_read(&mq->max_write_speed);
+
+               if (!speed_valid(speed))
+                       ret = scnprintf(buf, PAGE_SIZE, "%uMB\n", cache_size);
+               else { /* We accept race between cache_jiffies and cache_used */
+                       unsigned long size = jiffies_and_speed_to_size(
+                               jiffies - mq->cache_jiffies, speed);
+                       long used = atomic_long_read(&mq->cache_used);
+
+                       if (size >= used)
+                               size = 0;
+                       else
+                               size = (used - size) * 100 / cache_size
+                                       / 1024UL / 1024UL;
+
+                       ret = scnprintf(buf, PAGE_SIZE, "%uMB %lu%% used\n",
+                               cache_size, size);
+               }
+       }
+
+       mmc_blk_put(md);
+       return ret;
+}
+
+static ssize_t cache_size_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       struct mmc_blk_data *md;
+       unsigned int set = 0;
+
+       if (strncasecmp(off, buf, sizeof(off) - 2)
+        && (kstrtouint(buf, 0, &set) || (set > INT_MAX)))
+               return -EINVAL;
+
+       md = mmc_blk_get(dev_to_disk(dev));
+       atomic_set(&md->queue.cache_size, set);
+       mmc_blk_put(md);
+       return count;
+}
+
+static const DEVICE_ATTR(cache_size, S_IRUGO | S_IWUSR,
+       cache_size_show, cache_size_store);
+
+/* correct for write-back */
+static long mmc_blk_cache_used(struct mmc_queue *mq, unsigned long waitfor)
+{
+       long used = 0;
+       int speed = atomic_read(&mq->max_write_speed);
+
+       if (speed_valid(speed)) {
+               unsigned long size = jiffies_and_speed_to_size(
+                                       waitfor - mq->cache_jiffies, speed);
+               used = atomic_long_read(&mq->cache_used);
+
+               if (size >= used)
+                       used = 0;
+               else
+                       used -= size;
+       }
+
+       atomic_long_set(&mq->cache_used, used);
+       mq->cache_jiffies = waitfor;
+
+       return used;
+}
+
+static void mmc_blk_simulate_delay(
+       struct mmc_queue *mq,
+       struct request *req,
+       unsigned long waitfor)
+{
+       int max_speed;
+
+       if (!req)
+               return;
+
+       max_speed = (rq_data_dir(req) == READ)
+               ? atomic_read(&mq->max_read_speed)
+               : atomic_read(&mq->max_write_speed);
+       if (speed_valid(max_speed)) {
+               unsigned long bytes = blk_rq_bytes(req);
+
+               if (rq_data_dir(req) != READ) {
+                       int cache_size = atomic_read(&mq->cache_size);
+
+                       if (cache_size) {
+                               unsigned long size = cache_size * 1024L * 1024L;
+                               long used = mmc_blk_cache_used(mq, waitfor);
+
+                               used += bytes;
+                               atomic_long_set(&mq->cache_used, used);
+                               bytes = 0;
+                               if (used > size)
+                                       bytes = used - size;
+                       }
+               }
+               waitfor += size_and_speed_to_jiffies(bytes, max_speed);
+               if (time_is_after_jiffies(waitfor)) {
+                       long msecs = jiffies_to_msecs(waitfor - jiffies);
+
+                       if (likely(msecs > 0))
+                               msleep(msecs);
+               }
+       }
+}
+
+#else
+
+#define mmc_blk_simulate_delay(mq, req, waitfor)
+
+#endif
+
 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 {
        struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -450,9 +689,11 @@ static int ioctl_do_sanitize(struct mmc_card *card)
        pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
                mmc_hostname(card->host), __func__);
 
+       trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                        EXT_CSD_SANITIZE_START, 1,
                                        MMC_SANITIZE_REQ_TIMEOUT);
+       trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
 
        if (err)
                pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
@@ -952,18 +1193,22 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
                        req->rq_disk->disk_name, "timed out", name, status);
 
                /* If the status cmd initially failed, retry the r/w cmd */
-               if (!status_valid)
+               if (!status_valid) {
+                       pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
                        return ERR_RETRY;
-
+               }
                /*
                 * If it was a r/w cmd crc error, or illegal command
                 * (eg, issued in wrong state) then retry - we should
                 * have corrected the state problem above.
                 */
-               if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+               if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+                       pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
                        return ERR_RETRY;
+               }
 
                /* Otherwise abort the command */
+               pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
                return ERR_ABORT;
 
        default:
@@ -1263,6 +1508,23 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
        if (ret)
                ret = -EIO;
 
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+       else if (atomic_read(&mq->cache_size)) {
+               long used = mmc_blk_cache_used(mq, jiffies);
+
+               if (used) {
+                       int speed = atomic_read(&mq->max_write_speed);
+
+                       if (speed_valid(speed)) {
+                               unsigned long msecs = jiffies_to_msecs(
+                                       size_and_speed_to_jiffies(
+                                               used, speed));
+                               if (msecs)
+                                       msleep(msecs);
+                       }
+               }
+       }
+#endif
        blk_end_request_all(req, ret);
 
        return ret ? 0 : 1;
@@ -1942,6 +2204,9 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
        struct mmc_async_req *areq;
        const u8 packed_nr = 2;
        u8 reqs = 0;
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+       unsigned long waitfor = jiffies;
+#endif
 
        if (!rqc && !mq->mqrq_prev->req)
                return 0;
@@ -1992,6 +2257,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
                         */
                        mmc_blk_reset_success(md, type);
 
+                       mmc_blk_simulate_delay(mq, rqc, waitfor);
+
                        if (mmc_packed_cmd(mq_rq->cmd_type)) {
                                ret = mmc_blk_end_packed_req(mq_rq);
                                break;
@@ -2252,6 +2519,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
        md->disk->queue = md->queue.queue;
        md->disk->driverfs_dev = parent;
        set_disk_ro(md->disk, md->read_only || default_ro);
+       md->disk->flags = GENHD_FL_EXT_DEVT;
        if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
                md->disk->flags |= GENHD_FL_NO_PART_SCAN;
 
@@ -2410,6 +2678,14 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
                                        card->ext_csd.boot_ro_lockable)
                                device_remove_file(disk_to_dev(md->disk),
                                        &md->power_ro_lock);
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+                       device_remove_file(disk_to_dev(md->disk),
+                                               &dev_attr_max_write_speed);
+                       device_remove_file(disk_to_dev(md->disk),
+                                               &dev_attr_max_read_speed);
+                       device_remove_file(disk_to_dev(md->disk),
+                                               &dev_attr_cache_size);
+#endif
 
                        del_gendisk(md->disk);
                }
@@ -2445,6 +2721,24 @@ static int mmc_add_disk(struct mmc_blk_data *md)
        ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
        if (ret)
                goto force_ro_fail;
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+       atomic_set(&md->queue.max_write_speed, max_write_speed);
+       ret = device_create_file(disk_to_dev(md->disk),
+                       &dev_attr_max_write_speed);
+       if (ret)
+               goto max_write_speed_fail;
+       atomic_set(&md->queue.max_read_speed, max_read_speed);
+       ret = device_create_file(disk_to_dev(md->disk),
+                       &dev_attr_max_read_speed);
+       if (ret)
+               goto max_read_speed_fail;
+       atomic_set(&md->queue.cache_size, cache_size);
+       atomic_long_set(&md->queue.cache_used, 0);
+       md->queue.cache_jiffies = jiffies;
+       ret = device_create_file(disk_to_dev(md->disk), &dev_attr_cache_size);
+       if (ret)
+               goto cache_size_fail;
+#endif
 
        if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
             card->ext_csd.boot_ro_lockable) {
@@ -2469,6 +2763,14 @@ static int mmc_add_disk(struct mmc_blk_data *md)
        return ret;
 
 power_ro_lock_fail:
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+       device_remove_file(disk_to_dev(md->disk), &dev_attr_cache_size);
+cache_size_fail:
+       device_remove_file(disk_to_dev(md->disk), &dev_attr_max_read_speed);
+max_read_speed_fail:
+       device_remove_file(disk_to_dev(md->disk), &dev_attr_max_write_speed);
+max_write_speed_fail:
+#endif
        device_remove_file(disk_to_dev(md->disk), &md->force_ro);
 force_ro_fail:
        del_gendisk(md->disk);
index 6f4323c6d6536c8855ca2a8bef8f8087bb5d252e..6a4cd2bb4629c4b945cf096574a9813a685a7d3a 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
+#include <linux/sched/rt.h>
 #include "queue.h"
 
 #define MMC_QUEUE_BOUNCESZ     65536
@@ -50,6 +51,11 @@ static int mmc_queue_thread(void *d)
 {
        struct mmc_queue *mq = d;
        struct request_queue *q = mq->queue;
+       struct sched_param scheduler_params = {0};
+
+       scheduler_params.sched_priority = 1;
+
+       sched_setscheduler(current, SCHED_FIFO, &scheduler_params);
 
        current->flags |= PF_MEMALLOC;
 
index cf30b3712cb20cbef8e2f65fbc8f6a379755e7d2..1dc4c99f52a17585f67820d428001a8ae6f7bb14 100644 (file)
@@ -58,6 +58,14 @@ struct mmc_queue {
        struct mmc_queue_req    mqrq[2];
        struct mmc_queue_req    *mqrq_cur;
        struct mmc_queue_req    *mqrq_prev;
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+       atomic_t max_write_speed;
+       atomic_t max_read_speed;
+       atomic_t cache_size;
+       /* i/o tracking */
+       atomic_long_t cache_used;
+       unsigned long cache_jiffies;
+#endif
 };
 
 extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
index 4c33d7690f2f66c4f8eb244480d06bce58acd3f1..87cc07dedd9f4191bbf80193d6336556f5d504c5 100644 (file)
@@ -1,3 +1,18 @@
 #
 # MMC core configuration
 #
+
+config MMC_EMBEDDED_SDIO
+       boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
+       help
+         If you say Y here, support will be added for embedded SDIO
+         devices which do not contain the necessary enumeration
+         support in hardware to be properly detected.
+
+config MMC_PARANOID_SD_INIT
+       bool "Enable paranoid SD card initialization (EXPERIMENTAL)"
+       help
+         If you say Y here, the MMC layer will be extra paranoid
+         about re-trying SD init requests. This can be a useful
+         work-around for buggy controllers and hardware. Enable
+         if you are experiencing issues with SD detection.
index 5f7d10ba498a8b3183e620fcb5ce3873b969cb5f..2986e270d19a894a50492a1b2b7a724562559b48 100644 (file)
@@ -30,6 +30,9 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/mmc.h>
+
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
 #include "sd_ops.h"
 #include "sdio_ops.h"
 
+EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_erase_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_erase_end);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_rw_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(mmc_blk_rw_end);
+
 /* If the device is not responding */
 #define MMC_CORE_TIMEOUT_MS    (10 * 60 * 1000) /* 10 minute timeout */
 
@@ -175,6 +183,20 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
                        pr_debug("%s:     %d bytes transferred: %d\n",
                                mmc_hostname(host),
                                mrq->data->bytes_xfered, mrq->data->error);
+#ifdef CONFIG_BLOCK
+                       if (mrq->lat_hist_enabled) {
+                               ktime_t completion;
+                               u_int64_t delta_us;
+
+                               completion = ktime_get();
+                               delta_us = ktime_us_delta(completion,
+                                                         mrq->io_start);
+                               blk_update_latency_hist(&host->io_lat_s,
+                                       (mrq->data->flags & MMC_DATA_READ),
+                                       delta_us);
+                       }
+#endif
+                       trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
                }
 
                if (mrq->stop) {
@@ -617,8 +639,19 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
                }
        }
 
-       if (!err && areq)
+       if (!err && areq) {
+#ifdef CONFIG_BLOCK
+               if (host->latency_hist_enabled) {
+                       areq->mrq->io_start = ktime_get();
+                       areq->mrq->lat_hist_enabled = 1;
+               } else
+                       areq->mrq->lat_hist_enabled = 0;
+#endif
+               trace_mmc_blk_rw_start(areq->mrq->cmd->opcode,
+                                      areq->mrq->cmd->arg,
+                                      areq->mrq->data);
                start_err = __mmc_start_data_req(host, areq->mrq);
+       }
 
        if (host->areq)
                mmc_post_req(host, host->areq->mrq, 0);
@@ -1951,7 +1984,7 @@ void mmc_init_erase(struct mmc_card *card)
 }
 
 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
-                                         unsigned int arg, unsigned int qty)
+                                         unsigned int arg, unsigned int qty)
 {
        unsigned int erase_timeout;
 
@@ -2055,8 +2088,13 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
        struct mmc_command cmd = {0};
        unsigned int qty = 0;
        unsigned long timeout;
+       unsigned int fr, nr;
        int err;
 
+       fr = from;
+       nr = to - from + 1;
+       trace_mmc_blk_erase_start(arg, fr, nr);
+
        mmc_retune_hold(card->host);
 
        /*
@@ -2163,6 +2201,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
                 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
 out:
        mmc_retune_release(card->host);
+       trace_mmc_blk_erase_end(arg, fr, nr);
        return err;
 }
 
@@ -2832,6 +2871,22 @@ void mmc_init_context_info(struct mmc_host *host)
        init_waitqueue_head(&host->context_info.wait);
 }
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+void mmc_set_embedded_sdio_data(struct mmc_host *host,
+                               struct sdio_cis *cis,
+                               struct sdio_cccr *cccr,
+                               struct sdio_embedded_func *funcs,
+                               int num_funcs)
+{
+       host->embedded_sdio_data.cis = cis;
+       host->embedded_sdio_data.cccr = cccr;
+       host->embedded_sdio_data.funcs = funcs;
+       host->embedded_sdio_data.num_funcs = num_funcs;
+}
+
+EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
+#endif
+
 static int __init mmc_init(void)
 {
        int ret;
@@ -2872,6 +2927,56 @@ static void __exit mmc_exit(void)
        destroy_workqueue(workqueue);
 }
 
+#ifdef CONFIG_BLOCK
+static ssize_t
+latency_hist_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+       return blk_latency_hist_show(&host->io_lat_s, buf);
+}
+
+/*
+ * Values permitted 0, 1, 2.
+ * 0 -> Disable IO latency histograms (default)
+ * 1 -> Enable IO latency histograms
+ * 2 -> Zero out IO latency histograms
+ */
+static ssize_t
+latency_hist_store(struct device *dev, struct device_attribute *attr,
+                  const char *buf, size_t count)
+{
+       struct mmc_host *host = cls_dev_to_mmc_host(dev);
+       long value;
+
+       if (kstrtol(buf, 0, &value))
+               return -EINVAL;
+       if (value == BLK_IO_LAT_HIST_ZERO)
+               blk_zero_latency_hist(&host->io_lat_s);
+       else if (value == BLK_IO_LAT_HIST_ENABLE ||
+                value == BLK_IO_LAT_HIST_DISABLE)
+               host->latency_hist_enabled = value;
+       return count;
+}
+
+static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
+                  latency_hist_show, latency_hist_store);
+
+void
+mmc_latency_hist_sysfs_init(struct mmc_host *host)
+{
+       if (device_create_file(&host->class_dev, &dev_attr_latency_hist))
+               dev_err(&host->class_dev,
+                       "Failed to create latency_hist sysfs entry\n");
+}
+
+void
+mmc_latency_hist_sysfs_exit(struct mmc_host *host)
+{
+       device_remove_file(&host->class_dev, &dev_attr_latency_hist);
+}
+#endif
+
 subsys_initcall(mmc_init);
 module_exit(mmc_exit);
 
index da950c44204d27d6db8cd5a2d56b6b3e88e27c09..443fdfc22d8a7ec5783ab28410b28c81feec8806 100644 (file)
@@ -32,8 +32,6 @@
 #include "slot-gpio.h"
 #include "pwrseq.h"
 
-#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
-
 static DEFINE_IDR(mmc_host_idr);
 static DEFINE_SPINLOCK(mmc_host_lock);
 
@@ -394,8 +392,13 @@ int mmc_add_host(struct mmc_host *host)
        mmc_add_host_debugfs(host);
 #endif
 
+#ifdef CONFIG_BLOCK
+       mmc_latency_hist_sysfs_init(host);
+#endif
+
        mmc_start_host(host);
-       register_pm_notifier(&host->pm_notify);
+       if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+               register_pm_notifier(&host->pm_notify);
 
        return 0;
 }
@@ -412,13 +415,19 @@ EXPORT_SYMBOL(mmc_add_host);
  */
 void mmc_remove_host(struct mmc_host *host)
 {
-       unregister_pm_notifier(&host->pm_notify);
+       if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+               unregister_pm_notifier(&host->pm_notify);
+
        mmc_stop_host(host);
 
 #ifdef CONFIG_DEBUG_FS
        mmc_remove_host_debugfs(host);
 #endif
 
+#ifdef CONFIG_BLOCK
+       mmc_latency_hist_sysfs_exit(host);
+#endif
+
        device_del(&host->class_dev);
 
        led_trigger_unregister_simple(host->led);
index 992bf53976337f0edf3f64fe4f9a007f4693b3a6..bf38533406fd88e27afdecee36d46928c9f0e47f 100644 (file)
@@ -12,6 +12,8 @@
 #define _MMC_CORE_HOST_H
 #include <linux/mmc/host.h>
 
+#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
+
 int mmc_register_host_class(void);
 void mmc_unregister_host_class(void);
 
@@ -21,5 +23,8 @@ void mmc_retune_hold(struct mmc_host *host);
 void mmc_retune_release(struct mmc_host *host);
 int mmc_retune(struct mmc_host *host);
 
+void mmc_latency_hist_sysfs_init(struct mmc_host *host);
+void mmc_latency_hist_sysfs_exit(struct mmc_host *host);
+
 #endif
 
index 967535d76e3468199979be0ffff23a7d0d3c2a93..ee145d4cc5418b232d07371c1fcaa4b1b2fdcdde 100644 (file)
@@ -809,6 +809,9 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
        bool reinit)
 {
        int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+       int retries;
+#endif
 
        if (!reinit) {
                /*
@@ -835,7 +838,26 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
                /*
                 * Fetch switch information from card.
                 */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+               for (retries = 1; retries <= 3; retries++) {
+                       err = mmc_read_switch(card);
+                       if (!err) {
+                               if (retries > 1) {
+                                       printk(KERN_WARNING
+                                              "%s: recovered\n",
+                                              mmc_hostname(host));
+                               }
+                               break;
+                       } else {
+                               printk(KERN_WARNING
+                                      "%s: read switch failed (attempt %d)\n",
+                                      mmc_hostname(host), retries);
+                       }
+               }
+#else
                err = mmc_read_switch(card);
+#endif
+
                if (err)
                        return err;
        }
@@ -1033,7 +1055,10 @@ static int mmc_sd_alive(struct mmc_host *host)
  */
 static void mmc_sd_detect(struct mmc_host *host)
 {
-       int err;
+       int err = 0;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+       int retries = 5;
+#endif
 
        BUG_ON(!host);
        BUG_ON(!host->card);
@@ -1043,7 +1068,23 @@ static void mmc_sd_detect(struct mmc_host *host)
        /*
         * Just check if our card has been removed.
         */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+       while(retries) {
+               err = mmc_send_status(host->card, NULL);
+               if (err) {
+                       retries--;
+                       udelay(5);
+                       continue;
+               }
+               break;
+       }
+       if (!retries) {
+               printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
+                      __func__, mmc_hostname(host), err);
+       }
+#else
        err = _mmc_detect_card_removed(host);
+#endif
 
        mmc_put_card(host->card);
 
@@ -1105,6 +1146,9 @@ static int mmc_sd_suspend(struct mmc_host *host)
 static int _mmc_sd_resume(struct mmc_host *host)
 {
        int err = 0;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+       int retries;
+#endif
 
        BUG_ON(!host);
        BUG_ON(!host->card);
@@ -1115,7 +1159,23 @@ static int _mmc_sd_resume(struct mmc_host *host)
                goto out;
 
        mmc_power_up(host, host->card->ocr);
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+       retries = 5;
+       while (retries) {
+               err = mmc_sd_init_card(host, host->card->ocr, host->card);
+
+               if (err) {
+                       printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
+                              mmc_hostname(host), err, retries);
+                       mdelay(5);
+                       retries--;
+                       continue;
+               }
+               break;
+       }
+#else
        err = mmc_sd_init_card(host, host->card->ocr, host->card);
+#endif
        mmc_card_clr_suspended(host->card);
 
 out:
@@ -1201,6 +1261,9 @@ int mmc_attach_sd(struct mmc_host *host)
 {
        int err;
        u32 ocr, rocr;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+       int retries;
+#endif
 
        BUG_ON(!host);
        WARN_ON(!host->claimed);
@@ -1237,9 +1300,27 @@ int mmc_attach_sd(struct mmc_host *host)
        /*
         * Detect and init the card.
         */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+       retries = 5;
+       while (retries) {
+               err = mmc_sd_init_card(host, rocr, NULL);
+               if (err) {
+                       retries--;
+                       continue;
+               }
+               break;
+       }
+
+       if (!retries) {
+               printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n",
+                      mmc_hostname(host), err);
+               goto err;
+       }
+#else
        err = mmc_sd_init_card(host, rocr, NULL);
        if (err)
                goto err;
+#endif
 
        mmc_release_host(host);
        err = mmc_add_card(host->card);
index 467b3cf80c44549c704080d78540c46599eb6882..b47957122fd75348b70b9e64c757952ffe8c234e 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/err.h>
+#include <linux/module.h>
 #include <linux/pm_runtime.h>
 
 #include <linux/mmc/host.h>
@@ -21,6 +22,7 @@
 
 #include "core.h"
 #include "bus.h"
+#include "host.h"
 #include "sd.h"
 #include "sdio_bus.h"
 #include "mmc_ops.h"
 #include "sdio_ops.h"
 #include "sdio_cis.h"
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/sdio_ids.h>
+#endif
+
 static int sdio_read_fbr(struct sdio_func *func)
 {
        int ret;
@@ -699,19 +705,35 @@ try_again:
                goto finish;
        }
 
-       /*
-        * Read the common registers.
-        */
-       err = sdio_read_cccr(card, ocr);
-       if (err)
-               goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+       if (host->embedded_sdio_data.cccr)
+               memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
+       else {
+#endif
+               /*
+                * Read the common registers.
+                */
+               err = sdio_read_cccr(card,  ocr);
+               if (err)
+                       goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+       }
+#endif
 
-       /*
-        * Read the common CIS tuples.
-        */
-       err = sdio_read_common_cis(card);
-       if (err)
-               goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+       if (host->embedded_sdio_data.cis)
+               memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
+       else {
+#endif
+               /*
+                * Read the common CIS tuples.
+                */
+               err = sdio_read_common_cis(card);
+               if (err)
+                       goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+       }
+#endif
 
        if (oldcard) {
                int same = (card->cis.vendor == oldcard->cis.vendor &&
@@ -1120,14 +1142,36 @@ int mmc_attach_sdio(struct mmc_host *host)
        funcs = (ocr & 0x70000000) >> 28;
        card->sdio_funcs = 0;
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+       if (host->embedded_sdio_data.funcs)
+               card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
+#endif
+
        /*
         * Initialize (but don't add) all present functions.
         */
        for (i = 0; i < funcs; i++, card->sdio_funcs++) {
-               err = sdio_init_func(host->card, i + 1);
-               if (err)
-                       goto remove;
-
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+               if (host->embedded_sdio_data.funcs) {
+                       struct sdio_func *tmp;
+
+                       tmp = sdio_alloc_func(host->card);
+                       if (IS_ERR(tmp))
+                               goto remove;
+                       tmp->num = (i + 1);
+                       card->sdio_func[i] = tmp;
+                       tmp->class = host->embedded_sdio_data.funcs[i].f_class;
+                       tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
+                       tmp->vendor = card->cis.vendor;
+                       tmp->device = card->cis.device;
+               } else {
+#endif
+                       err = sdio_init_func(host->card, i + 1);
+                       if (err)
+                               goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+               }
+#endif
                /*
                 * Enable Runtime PM for this func (if supported)
                 */
@@ -1175,3 +1219,42 @@ err:
        return err;
 }
 
+int sdio_reset_comm(struct mmc_card *card)
+{
+       struct mmc_host *host = card->host;
+       u32 ocr;
+       u32 rocr;
+       int err;
+
+       printk("%s():\n", __func__);
+       mmc_claim_host(host);
+
+       mmc_retune_disable(host);
+
+       mmc_go_idle(host);
+
+       mmc_set_clock(host, host->f_min);
+
+       err = mmc_send_io_op_cond(host, 0, &ocr);
+       if (err)
+               goto err;
+
+       rocr = mmc_select_voltage(host, ocr);
+       if (!rocr) {
+               err = -EINVAL;
+               goto err;
+       }
+
+       err = mmc_sdio_init_card(host, rocr, card, 0);
+       if (err)
+               goto err;
+
+       mmc_release_host(host);
+       return 0;
+err:
+       printk("%s: Error resetting SDIO communications (%d)\n",
+              mmc_hostname(host), err);
+       mmc_release_host(host);
+       return err;
+}
+EXPORT_SYMBOL(sdio_reset_comm);
index 7e327a6dd53da304cc59b9012e311504bfaf67df..e32ed3d28b067e79516e0e51dc64d2247fd5604f 100644 (file)
 #include "sdio_cis.h"
 #include "sdio_bus.h"
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/host.h>
+#endif
+
 #define to_sdio_driver(d)      container_of(d, struct sdio_driver, drv)
 
 /* show configuration fields */
@@ -263,7 +267,14 @@ static void sdio_release_func(struct device *dev)
 {
        struct sdio_func *func = dev_to_sdio_func(dev);
 
-       sdio_free_func_cis(func);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+       /*
+        * If this device is embedded then we never allocated
+        * cis tables for this func
+        */
+       if (!func->card->host->embedded_sdio_data.funcs)
+#endif
+               sdio_free_func_cis(func);
 
        kfree(func->info);
 
index 78cb4d5d9d58184754973a5e3eee1c64a635deb5..8fdeb07723a67b49c94f2f79e75e904713170ae9 100644 (file)
@@ -383,6 +383,39 @@ u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
 }
 EXPORT_SYMBOL_GPL(sdio_readb);
 
+/**
+ *     sdio_readb_ext - read a single byte from a SDIO function
+ *     @func: SDIO function to access
+ *     @addr: address to read
+ *     @err_ret: optional status value from transfer
+ *     @in: value to add to argument
+ *
+ *     Reads a single byte from the address space of a given SDIO
+ *     function. If there is a problem reading the address, 0xff
+ *     is returned and @err_ret will contain the error code.
+ */
+unsigned char sdio_readb_ext(struct sdio_func *func, unsigned int addr,
+       int *err_ret, unsigned in)
+{
+       int ret;
+       unsigned char val;
+
+       BUG_ON(!func);
+
+       if (err_ret)
+               *err_ret = 0;
+
+       ret = mmc_io_rw_direct(func->card, 0, func->num, addr, (u8)in, &val);
+       if (ret) {
+               if (err_ret)
+                       *err_ret = ret;
+               return 0xFF;
+       }
+
+       return val;
+}
+EXPORT_SYMBOL_GPL(sdio_readb_ext);
+
 /**
  *     sdio_writeb - write a single byte to a SDIO function
  *     @func: SDIO function to access
index 289664089cf32ce9770baefba951e089f0681a11..093fd917274f8db4d4d4228036c2bcf868387669 100644 (file)
@@ -1,3 +1,10 @@
+config MTD_NAND_IDS
+       tristate "Include chip ids for known NAND devices."
+       depends on MTD
+       help
+         Useful for NAND drivers that do not use the NAND subsystem but
+         still like to take advantage of the known chip information.
+
 config MTD_NAND_ECC
        tristate
 
@@ -108,9 +115,6 @@ config MTD_NAND_OMAP_BCH
 config MTD_NAND_OMAP_BCH_BUILD
        def_tristate MTD_NAND_OMAP2 && MTD_NAND_OMAP_BCH
 
-config MTD_NAND_IDS
-       tristate
-
 config MTD_NAND_RICOH
        tristate "Ricoh xD card reader"
        default n
index 1373c6d7278d84dbcfb84a936745e01d8273aa44..282aec4860ebe56b6097a3d6a20da7725c103e2c 100644 (file)
@@ -149,6 +149,23 @@ config PPPOL2TP
          tunnels. L2TP is replacing PPTP for VPN uses.
 if TTY
 
+config PPPOLAC
+       tristate "PPP on L2TP Access Concentrator"
+       depends on PPP && INET
+       help
+         L2TP (RFC 2661) is a tunneling protocol widely used in virtual private
+         networks. This driver handles L2TP data packets between a UDP socket
+         and a PPP channel, but only permits one session per socket. Thus it is
+         fairly simple and suited for clients.
+
+config PPPOPNS
+       tristate "PPP on PPTP Network Server"
+       depends on PPP && INET
+       help
+         PPTP (RFC 2637) is a tunneling protocol widely used in virtual private
+         networks. This driver handles PPTP data packets between a RAW socket
+         and a PPP channel. It is fairly simple and easy to use.
+
 config PPP_ASYNC
        tristate "PPP support for async serial ports"
        depends on PPP
index a6b6297b00669a3f1e0852f83780550a295e3216..d283d03c4683a9daa8f27f592d3f990ea8af0b79 100644 (file)
@@ -11,3 +11,5 @@ obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
 obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
 obj-$(CONFIG_PPPOL2TP) += pppox.o
 obj-$(CONFIG_PPTP) += pppox.o pptp.o
+obj-$(CONFIG_PPPOLAC) += pppox.o pppolac.o
+obj-$(CONFIG_PPPOPNS) += pppox.o pppopns.o
index 174e06ec7c2fe88014ee77c8a60e99788e459072..e5bb870b5461aa9afd81b1a41f36bbf6f4a58234 100644 (file)
@@ -2390,8 +2390,6 @@ ppp_unregister_channel(struct ppp_channel *chan)
        spin_lock_bh(&pn->all_channels_lock);
        list_del(&pch->list);
        spin_unlock_bh(&pn->all_channels_lock);
-       put_net(pch->chan_net);
-       pch->chan_net = NULL;
 
        pch->file.dead = 1;
        wake_up_interruptible(&pch->file.rwait);
@@ -2984,6 +2982,9 @@ ppp_disconnect_channel(struct channel *pch)
  */
 static void ppp_destroy_channel(struct channel *pch)
 {
+       put_net(pch->chan_net);
+       pch->chan_net = NULL;
+
        atomic_dec(&channel_count);
 
        if (!pch->file.dead) {
diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c
new file mode 100644 (file)
index 0000000..0184c96
--- /dev/null
@@ -0,0 +1,448 @@
+/* drivers/net/pppolac.c
+ *
+ * Driver for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles L2TP data packets between a UDP socket and a PPP channel.
+ * The socket must keep connected, and only one session per socket is permitted.
+ * Sequencing of outgoing packets is controlled by LNS. Incoming packets with
+ * sequences are reordered within a sliding window of one second. Currently
+ * reordering only happens when a packet is received. It is done for simplicity
+ * since no additional locks or threads are required. This driver only works on
+ * IPv4 due to the lack of UDP encapsulation support in IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/udp.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <net/tcp_states.h>
+#include <asm/uaccess.h>
+
+#define L2TP_CONTROL_BIT       0x80
+#define L2TP_LENGTH_BIT                0x40
+#define L2TP_SEQUENCE_BIT      0x08
+#define L2TP_OFFSET_BIT                0x02
+#define L2TP_VERSION           0x02
+#define L2TP_VERSION_MASK      0x0F
+
+#define PPP_ADDR       0xFF
+#define PPP_CTRL       0x03
+
+union unaligned {
+       __u32 u32;
+} __attribute__((packed));
+
+static inline union unaligned *unaligned(void *ptr)
+{
+       return (union unaligned *)ptr;
+}
+
+struct meta {
+       __u32 sequence;
+       __u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+       return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb)
+{
+       struct sock *sk = (struct sock *)sk_udp->sk_user_data;
+       struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac;
+       struct meta *meta = skb_meta(skb);
+       __u32 now = jiffies;
+       __u8 bits;
+       __u8 *ptr;
+
+       /* Drop the packet if L2TP header is missing. */
+       if (skb->len < sizeof(struct udphdr) + 6)
+               goto drop;
+
+       /* Put it back if it is a control packet. */
+       if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT)
+               return opt->backlog_rcv(sk_udp, skb);
+
+       /* Skip UDP header. */
+       skb_pull(skb, sizeof(struct udphdr));
+
+       /* Check the version. */
+       if ((skb->data[1] & L2TP_VERSION_MASK) != L2TP_VERSION)
+               goto drop;
+       bits = skb->data[0];
+       ptr = &skb->data[2];
+
+       /* Check the length if it is present. */
+       if (bits & L2TP_LENGTH_BIT) {
+               if ((ptr[0] << 8 | ptr[1]) != skb->len)
+                       goto drop;
+               ptr += 2;
+       }
+
+       /* Skip all fields including optional ones. */
+       if (!skb_pull(skb, 6 + (bits & L2TP_SEQUENCE_BIT ? 4 : 0) +
+                       (bits & L2TP_LENGTH_BIT ? 2 : 0) +
+                       (bits & L2TP_OFFSET_BIT ? 2 : 0)))
+               goto drop;
+
+       /* Skip the offset padding if it is present. */
+       if (bits & L2TP_OFFSET_BIT &&
+                       !skb_pull(skb, skb->data[-2] << 8 | skb->data[-1]))
+               goto drop;
+
+       /* Check the tunnel and the session. */
+       if (unaligned(ptr)->u32 != opt->local)
+               goto drop;
+
+       /* Check the sequence if it is present. */
+       if (bits & L2TP_SEQUENCE_BIT) {
+               meta->sequence = ptr[4] << 8 | ptr[5];
+               if ((__s16)(meta->sequence - opt->recv_sequence) < 0)
+                       goto drop;
+       }
+
+       /* Skip PPP address and control if they are present. */
+       if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+                       skb->data[1] == PPP_CTRL)
+               skb_pull(skb, 2);
+
+       /* Fix PPP protocol if it is compressed. */
+       if (skb->len >= 1 && skb->data[0] & 1)
+               skb_push(skb, 1)[0] = 0;
+
+       /* Drop the packet if PPP protocol is missing. */
+       if (skb->len < 2)
+               goto drop;
+
+       /* Perform reordering if sequencing is enabled. */
+       atomic_set(&opt->sequencing, bits & L2TP_SEQUENCE_BIT);
+       if (bits & L2TP_SEQUENCE_BIT) {
+               struct sk_buff *skb1;
+
+               /* Insert the packet into receive queue in order. */
+               skb_set_owner_r(skb, sk);
+               skb_queue_walk(&sk->sk_receive_queue, skb1) {
+                       struct meta *meta1 = skb_meta(skb1);
+                       __s16 order = meta->sequence - meta1->sequence;
+                       if (order == 0)
+                               goto drop;
+                       if (order < 0) {
+                               meta->timestamp = meta1->timestamp;
+                               skb_insert(skb1, skb, &sk->sk_receive_queue);
+                               skb = NULL;
+                               break;
+                       }
+               }
+               if (skb) {
+                       meta->timestamp = now;
+                       skb_queue_tail(&sk->sk_receive_queue, skb);
+               }
+
+               /* Remove packets from receive queue as long as
+                * 1. the receive buffer is full,
+                * 2. they are queued longer than one second, or
+                * 3. there are no missing packets before them. */
+               skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+                       meta = skb_meta(skb);
+                       if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+                                       now - meta->timestamp < HZ &&
+                                       meta->sequence != opt->recv_sequence)
+                               break;
+                       skb_unlink(skb, &sk->sk_receive_queue);
+                       opt->recv_sequence = (__u16)(meta->sequence + 1);
+                       skb_orphan(skb);
+                       ppp_input(&pppox_sk(sk)->chan, skb);
+               }
+               return NET_RX_SUCCESS;
+       }
+
+       /* Flush receive queue if sequencing is disabled. */
+       skb_queue_purge(&sk->sk_receive_queue);
+       skb_orphan(skb);
+       ppp_input(&pppox_sk(sk)->chan, skb);
+       return NET_RX_SUCCESS;
+drop:
+       kfree_skb(skb);
+       return NET_RX_DROP;
+}
+
+static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb)
+{
+       sock_hold(sk_udp);
+       sk_receive_skb(sk_udp, skb, 0);
+       return 0;
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppolac_xmit_core(struct work_struct *delivery_work)
+{
+       mm_segment_t old_fs = get_fs();
+       struct sk_buff *skb;
+
+       set_fs(KERNEL_DS);
+       while ((skb = skb_dequeue(&delivery_queue))) {
+               struct sock *sk_udp = skb->sk;
+               struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+               struct msghdr msg = { 0 };
+
+               iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1,
+                             skb->len);
+               sk_udp->sk_prot->sendmsg(sk_udp, &msg, skb->len);
+               kfree_skb(skb);
+       }
+       set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppolac_xmit_core);
+
+static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+       struct sock *sk_udp = (struct sock *)chan->private;
+       struct pppolac_opt *opt = &pppox_sk(sk_udp->sk_user_data)->proto.lac;
+
+       /* Install PPP address and control. */
+       skb_push(skb, 2);
+       skb->data[0] = PPP_ADDR;
+       skb->data[1] = PPP_CTRL;
+
+       /* Install L2TP header. */
+       if (atomic_read(&opt->sequencing)) {
+               skb_push(skb, 10);
+               skb->data[0] = L2TP_SEQUENCE_BIT;
+               skb->data[6] = opt->xmit_sequence >> 8;
+               skb->data[7] = opt->xmit_sequence;
+               skb->data[8] = 0;
+               skb->data[9] = 0;
+               opt->xmit_sequence++;
+       } else {
+               skb_push(skb, 6);
+               skb->data[0] = 0;
+       }
+       skb->data[1] = L2TP_VERSION;
+       unaligned(&skb->data[2])->u32 = opt->remote;
+
+       /* Now send the packet via the delivery queue. */
+       skb_set_owner_w(skb, sk_udp);
+       skb_queue_tail(&delivery_queue, skb);
+       schedule_work(&delivery_work);
+       return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppolac_channel_ops = {
+       .start_xmit = pppolac_xmit,
+};
+
+static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr,
+       int addrlen, int flags)
+{
+       struct sock *sk = sock->sk;
+       struct pppox_sock *po = pppox_sk(sk);
+       struct sockaddr_pppolac *addr = (struct sockaddr_pppolac *)useraddr;
+       struct socket *sock_udp = NULL;
+       struct sock *sk_udp;
+       int error;
+
+       if (addrlen != sizeof(struct sockaddr_pppolac) ||
+                       !addr->local.tunnel || !addr->local.session ||
+                       !addr->remote.tunnel || !addr->remote.session) {
+               return -EINVAL;
+       }
+
+       lock_sock(sk);
+       error = -EALREADY;
+       if (sk->sk_state != PPPOX_NONE)
+               goto out;
+
+       sock_udp = sockfd_lookup(addr->udp_socket, &error);
+       if (!sock_udp)
+               goto out;
+       sk_udp = sock_udp->sk;
+       lock_sock(sk_udp);
+
+       /* Remove this check when IPv6 supports UDP encapsulation. */
+       error = -EAFNOSUPPORT;
+       if (sk_udp->sk_family != AF_INET)
+               goto out;
+       error = -EPROTONOSUPPORT;
+       if (sk_udp->sk_protocol != IPPROTO_UDP)
+               goto out;
+       error = -EDESTADDRREQ;
+       if (sk_udp->sk_state != TCP_ESTABLISHED)
+               goto out;
+       error = -EBUSY;
+       if (udp_sk(sk_udp)->encap_type || sk_udp->sk_user_data)
+               goto out;
+       if (!sk_udp->sk_bound_dev_if) {
+               struct dst_entry *dst = sk_dst_get(sk_udp);
+               error = -ENODEV;
+               if (!dst)
+                       goto out;
+               sk_udp->sk_bound_dev_if = dst->dev->ifindex;
+               dst_release(dst);
+       }
+
+       po->chan.hdrlen = 12;
+       po->chan.private = sk_udp;
+       po->chan.ops = &pppolac_channel_ops;
+       po->chan.mtu = PPP_MRU - 80;
+       po->proto.lac.local = unaligned(&addr->local)->u32;
+       po->proto.lac.remote = unaligned(&addr->remote)->u32;
+       atomic_set(&po->proto.lac.sequencing, 1);
+       po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv;
+
+       error = ppp_register_channel(&po->chan);
+       if (error)
+               goto out;
+
+       sk->sk_state = PPPOX_CONNECTED;
+       udp_sk(sk_udp)->encap_type = UDP_ENCAP_L2TPINUDP;
+       udp_sk(sk_udp)->encap_rcv = pppolac_recv;
+       sk_udp->sk_backlog_rcv = pppolac_recv_core;
+       sk_udp->sk_user_data = sk;
+out:
+       if (sock_udp) {
+               release_sock(sk_udp);
+               if (error)
+                       sockfd_put(sock_udp);
+       }
+       release_sock(sk);
+       return error;
+}
+
+static int pppolac_release(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+
+       if (!sk)
+               return 0;
+
+       lock_sock(sk);
+       if (sock_flag(sk, SOCK_DEAD)) {
+               release_sock(sk);
+               return -EBADF;
+       }
+
+       if (sk->sk_state != PPPOX_NONE) {
+               struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private;
+               lock_sock(sk_udp);
+               skb_queue_purge(&sk->sk_receive_queue);
+               pppox_unbind_sock(sk);
+               udp_sk(sk_udp)->encap_type = 0;
+               udp_sk(sk_udp)->encap_rcv = NULL;
+               sk_udp->sk_backlog_rcv = pppox_sk(sk)->proto.lac.backlog_rcv;
+               sk_udp->sk_user_data = NULL;
+               release_sock(sk_udp);
+               sockfd_put(sk_udp->sk_socket);
+       }
+
+       sock_orphan(sk);
+       sock->sk = NULL;
+       release_sock(sk);
+       sock_put(sk);
+       return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppolac_proto = {
+       .name = "PPPOLAC",
+       .owner = THIS_MODULE,
+       .obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppolac_proto_ops = {
+       .family = PF_PPPOX,
+       .owner = THIS_MODULE,
+       .release = pppolac_release,
+       .bind = sock_no_bind,
+       .connect = pppolac_connect,
+       .socketpair = sock_no_socketpair,
+       .accept = sock_no_accept,
+       .getname = sock_no_getname,
+       .poll = sock_no_poll,
+       .ioctl = pppox_ioctl,
+       .listen = sock_no_listen,
+       .shutdown = sock_no_shutdown,
+       .setsockopt = sock_no_setsockopt,
+       .getsockopt = sock_no_getsockopt,
+       .sendmsg = sock_no_sendmsg,
+       .recvmsg = sock_no_recvmsg,
+       .mmap = sock_no_mmap,
+};
+
+static int pppolac_create(struct net *net, struct socket *sock, int kern)
+{
+       struct sock *sk;
+
+       sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppolac_proto, kern);
+       if (!sk)
+               return -ENOMEM;
+
+       sock_init_data(sock, sk);
+       sock->state = SS_UNCONNECTED;
+       sock->ops = &pppolac_proto_ops;
+       sk->sk_protocol = PX_PROTO_OLAC;
+       sk->sk_state = PPPOX_NONE;
+       return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppolac_pppox_proto = {
+       .create = pppolac_create,
+       .owner = THIS_MODULE,
+};
+
+static int __init pppolac_init(void)
+{
+       int error;
+
+       error = proto_register(&pppolac_proto, 0);
+       if (error)
+               return error;
+
+       error = register_pppox_proto(PX_PROTO_OLAC, &pppolac_pppox_proto);
+       if (error)
+               proto_unregister(&pppolac_proto);
+       else
+               skb_queue_head_init(&delivery_queue);
+       return error;
+}
+
+static void __exit pppolac_exit(void)
+{
+       unregister_pppox_proto(PX_PROTO_OLAC);
+       proto_unregister(&pppolac_proto);
+}
+
+module_init(pppolac_init);
+module_exit(pppolac_exit);
+
+MODULE_DESCRIPTION("PPP on L2TP Access Concentrator (PPPoLAC)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ppp/pppopns.c b/drivers/net/ppp/pppopns.c
new file mode 100644 (file)
index 0000000..d9e0603
--- /dev/null
@@ -0,0 +1,427 @@
+/* drivers/net/pppopns.c
+ *
+ * Driver for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles PPTP data packets between a RAW socket and a PPP channel.
+ * The socket is created in the kernel space and connected to the same address
+ * of the control socket. Outgoing packets are always sent with sequences but
+ * without acknowledgements. Incoming packets with sequences are reordered
+ * within a sliding window of one second. Currently reordering only happens when
+ * a packet is received. It is done for simplicity since no additional locks or
+ * threads are required. This driver should work on both IPv4 and IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/ppp_defs.h>
+#include <linux/if.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <asm/uaccess.h>
+
+#define GRE_HEADER_SIZE                8
+
+#define PPTP_GRE_BITS          htons(0x2001)
+#define PPTP_GRE_BITS_MASK     htons(0xEF7F)
+#define PPTP_GRE_SEQ_BIT       htons(0x1000)
+#define PPTP_GRE_ACK_BIT       htons(0x0080)
+#define PPTP_GRE_TYPE          htons(0x880B)
+
+#define PPP_ADDR       0xFF
+#define PPP_CTRL       0x03
+
+struct header {
+       __u16   bits;
+       __u16   type;
+       __u16   length;
+       __u16   call;
+       __u32   sequence;
+} __attribute__((packed));
+
+struct meta {
+       __u32 sequence;
+       __u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+       return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb)
+{
+       struct sock *sk = (struct sock *)sk_raw->sk_user_data;
+       struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns;
+       struct meta *meta = skb_meta(skb);
+       __u32 now = jiffies;
+       struct header *hdr;
+
+       /* Skip transport header */
+       skb_pull(skb, skb_transport_header(skb) - skb->data);
+
+       /* Drop the packet if GRE header is missing. */
+       if (skb->len < GRE_HEADER_SIZE)
+               goto drop;
+       hdr = (struct header *)skb->data;
+
+       /* Check the header. */
+       if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local ||
+                       (hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS)
+               goto drop;
+
+       /* Skip all fields including optional ones. */
+       if (!skb_pull(skb, GRE_HEADER_SIZE +
+                       (hdr->bits & PPTP_GRE_SEQ_BIT ? 4 : 0) +
+                       (hdr->bits & PPTP_GRE_ACK_BIT ? 4 : 0)))
+               goto drop;
+
+       /* Check the length. */
+       if (skb->len != ntohs(hdr->length))
+               goto drop;
+
+       /* Check the sequence if it is present. */
+       if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+               meta->sequence = ntohl(hdr->sequence);
+               if ((__s32)(meta->sequence - opt->recv_sequence) < 0)
+                       goto drop;
+       }
+
+       /* Skip PPP address and control if they are present. */
+       if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+                       skb->data[1] == PPP_CTRL)
+               skb_pull(skb, 2);
+
+       /* Fix PPP protocol if it is compressed. */
+       if (skb->len >= 1 && skb->data[0] & 1)
+               skb_push(skb, 1)[0] = 0;
+
+       /* Drop the packet if PPP protocol is missing. */
+       if (skb->len < 2)
+               goto drop;
+
+       /* Perform reordering if sequencing is enabled. */
+       if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+               struct sk_buff *skb1;
+
+               /* Insert the packet into receive queue in order. */
+               skb_set_owner_r(skb, sk);
+               skb_queue_walk(&sk->sk_receive_queue, skb1) {
+                       struct meta *meta1 = skb_meta(skb1);
+                       __s32 order = meta->sequence - meta1->sequence;
+                       if (order == 0)
+                               goto drop;
+                       if (order < 0) {
+                               meta->timestamp = meta1->timestamp;
+                               skb_insert(skb1, skb, &sk->sk_receive_queue);
+                               skb = NULL;
+                               break;
+                       }
+               }
+               if (skb) {
+                       meta->timestamp = now;
+                       skb_queue_tail(&sk->sk_receive_queue, skb);
+               }
+
+               /* Remove packets from receive queue as long as
+                * 1. the receive buffer is full,
+                * 2. they are queued longer than one second, or
+                * 3. there are no missing packets before them. */
+               skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+                       meta = skb_meta(skb);
+                       if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+                                       now - meta->timestamp < HZ &&
+                                       meta->sequence != opt->recv_sequence)
+                               break;
+                       skb_unlink(skb, &sk->sk_receive_queue);
+                       opt->recv_sequence = meta->sequence + 1;
+                       skb_orphan(skb);
+                       ppp_input(&pppox_sk(sk)->chan, skb);
+               }
+               return NET_RX_SUCCESS;
+       }
+
+       /* Flush receive queue if sequencing is disabled. */
+       skb_queue_purge(&sk->sk_receive_queue);
+       skb_orphan(skb);
+       ppp_input(&pppox_sk(sk)->chan, skb);
+       return NET_RX_SUCCESS;
+drop:
+       kfree_skb(skb);
+       return NET_RX_DROP;
+}
+
+static void pppopns_recv(struct sock *sk_raw)
+{
+       struct sk_buff *skb;
+       while ((skb = skb_dequeue(&sk_raw->sk_receive_queue))) {
+               sock_hold(sk_raw);
+               sk_receive_skb(sk_raw, skb, 0);
+       }
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppopns_xmit_core(struct work_struct *delivery_work)
+{
+       mm_segment_t old_fs = get_fs();
+       struct sk_buff *skb;
+
+       set_fs(KERNEL_DS);
+       while ((skb = skb_dequeue(&delivery_queue))) {
+               struct sock *sk_raw = skb->sk;
+               struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+               struct msghdr msg = { 0 };
+
+               iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1,
+                             skb->len);
+               sk_raw->sk_prot->sendmsg(sk_raw, &msg, skb->len);
+               kfree_skb(skb);
+       }
+       set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppopns_xmit_core);
+
+static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+       struct sock *sk_raw = (struct sock *)chan->private;
+       struct pppopns_opt *opt = &pppox_sk(sk_raw->sk_user_data)->proto.pns;
+       struct header *hdr;
+       __u16 length;
+
+       /* Install PPP address and control. */
+       skb_push(skb, 2);
+       skb->data[0] = PPP_ADDR;
+       skb->data[1] = PPP_CTRL;
+       length = skb->len;
+
+       /* Install PPTP GRE header. */
+       hdr = (struct header *)skb_push(skb, 12);
+       hdr->bits = PPTP_GRE_BITS | PPTP_GRE_SEQ_BIT;
+       hdr->type = PPTP_GRE_TYPE;
+       hdr->length = htons(length);
+       hdr->call = opt->remote;
+       hdr->sequence = htonl(opt->xmit_sequence);
+       opt->xmit_sequence++;
+
+       /* Now send the packet via the delivery queue. */
+       skb_set_owner_w(skb, sk_raw);
+       skb_queue_tail(&delivery_queue, skb);
+       schedule_work(&delivery_work);
+       return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppopns_channel_ops = {
+       .start_xmit = pppopns_xmit,
+};
+
+static int pppopns_connect(struct socket *sock, struct sockaddr *useraddr,
+       int addrlen, int flags)
+{
+       struct sock *sk = sock->sk;
+       struct pppox_sock *po = pppox_sk(sk);
+       struct sockaddr_pppopns *addr = (struct sockaddr_pppopns *)useraddr;
+       struct sockaddr_storage ss;
+       struct socket *sock_tcp = NULL;
+       struct socket *sock_raw = NULL;
+       struct sock *sk_tcp;
+       struct sock *sk_raw;
+       int error;
+
+       if (addrlen != sizeof(struct sockaddr_pppopns))
+               return -EINVAL;
+
+       lock_sock(sk);
+       error = -EALREADY;
+       if (sk->sk_state != PPPOX_NONE)
+               goto out;
+
+       sock_tcp = sockfd_lookup(addr->tcp_socket, &error);
+       if (!sock_tcp)
+               goto out;
+       sk_tcp = sock_tcp->sk;
+       error = -EPROTONOSUPPORT;
+       if (sk_tcp->sk_protocol != IPPROTO_TCP)
+               goto out;
+       addrlen = sizeof(struct sockaddr_storage);
+       error = kernel_getpeername(sock_tcp, (struct sockaddr *)&ss, &addrlen);
+       if (error)
+               goto out;
+       if (!sk_tcp->sk_bound_dev_if) {
+               struct dst_entry *dst = sk_dst_get(sk_tcp);
+               error = -ENODEV;
+               if (!dst)
+                       goto out;
+               sk_tcp->sk_bound_dev_if = dst->dev->ifindex;
+               dst_release(dst);
+       }
+
+       error = sock_create(ss.ss_family, SOCK_RAW, IPPROTO_GRE, &sock_raw);
+       if (error)
+               goto out;
+       sk_raw = sock_raw->sk;
+       sk_raw->sk_bound_dev_if = sk_tcp->sk_bound_dev_if;
+       error = kernel_connect(sock_raw, (struct sockaddr *)&ss, addrlen, 0);
+       if (error)
+               goto out;
+
+       po->chan.hdrlen = 14;
+       po->chan.private = sk_raw;
+       po->chan.ops = &pppopns_channel_ops;
+       po->chan.mtu = PPP_MRU - 80;
+       po->proto.pns.local = addr->local;
+       po->proto.pns.remote = addr->remote;
+       po->proto.pns.data_ready = sk_raw->sk_data_ready;
+       po->proto.pns.backlog_rcv = sk_raw->sk_backlog_rcv;
+
+       error = ppp_register_channel(&po->chan);
+       if (error)
+               goto out;
+
+       sk->sk_state = PPPOX_CONNECTED;
+       lock_sock(sk_raw);
+       sk_raw->sk_data_ready = pppopns_recv;
+       sk_raw->sk_backlog_rcv = pppopns_recv_core;
+       sk_raw->sk_user_data = sk;
+       release_sock(sk_raw);
+out:
+       if (sock_tcp)
+               sockfd_put(sock_tcp);
+       if (error && sock_raw)
+               sock_release(sock_raw);
+       release_sock(sk);
+       return error;
+}
+
+static int pppopns_release(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+
+       if (!sk)
+               return 0;
+
+       lock_sock(sk);
+       if (sock_flag(sk, SOCK_DEAD)) {
+               release_sock(sk);
+               return -EBADF;
+       }
+
+       if (sk->sk_state != PPPOX_NONE) {
+               struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private;
+               lock_sock(sk_raw);
+               skb_queue_purge(&sk->sk_receive_queue);
+               pppox_unbind_sock(sk);
+               sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready;
+               sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv;
+               sk_raw->sk_user_data = NULL;
+               release_sock(sk_raw);
+               sock_release(sk_raw->sk_socket);
+       }
+
+       sock_orphan(sk);
+       sock->sk = NULL;
+       release_sock(sk);
+       sock_put(sk);
+       return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppopns_proto = {
+       .name = "PPPOPNS",
+       .owner = THIS_MODULE,
+       .obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppopns_proto_ops = {
+       .family = PF_PPPOX,
+       .owner = THIS_MODULE,
+       .release = pppopns_release,
+       .bind = sock_no_bind,
+       .connect = pppopns_connect,
+       .socketpair = sock_no_socketpair,
+       .accept = sock_no_accept,
+       .getname = sock_no_getname,
+       .poll = sock_no_poll,
+       .ioctl = pppox_ioctl,
+       .listen = sock_no_listen,
+       .shutdown = sock_no_shutdown,
+       .setsockopt = sock_no_setsockopt,
+       .getsockopt = sock_no_getsockopt,
+       .sendmsg = sock_no_sendmsg,
+       .recvmsg = sock_no_recvmsg,
+       .mmap = sock_no_mmap,
+};
+
+static int pppopns_create(struct net *net, struct socket *sock, int kern)
+{
+       struct sock *sk;
+
+       sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppopns_proto, kern);
+       if (!sk)
+               return -ENOMEM;
+
+       sock_init_data(sock, sk);
+       sock->state = SS_UNCONNECTED;
+       sock->ops = &pppopns_proto_ops;
+       sk->sk_protocol = PX_PROTO_OPNS;
+       sk->sk_state = PPPOX_NONE;
+       return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppopns_pppox_proto = {
+       .create = pppopns_create,
+       .owner = THIS_MODULE,
+};
+
+static int __init pppopns_init(void)
+{
+       int error;
+
+       error = proto_register(&pppopns_proto, 0);
+       if (error)
+               return error;
+
+       error = register_pppox_proto(PX_PROTO_OPNS, &pppopns_pppox_proto);
+       if (error)
+               proto_unregister(&pppopns_proto);
+       else
+               skb_queue_head_init(&delivery_queue);
+       return error;
+}
+
+static void __exit pppopns_exit(void)
+{
+       unregister_pppox_proto(PX_PROTO_OPNS);
+       proto_unregister(&pppopns_proto);
+}
+
+module_init(pppopns_init);
+module_exit(pppopns_exit);
+
+MODULE_DESCRIPTION("PPP on PPTP Network Server (PPPoPNS)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
index 111b972e3053eab7ce299721165d8888494a7059..6941f0888b0023e1eb0f4ca3647ee39e241e1776 100644 (file)
@@ -862,10 +862,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
                goto drop;
 
-       if (skb->sk && sk_fullsock(skb->sk)) {
-               sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
-               sw_tx_timestamp(skb);
-       }
+       skb_tx_timestamp(skb);
 
        /* Orphan the skb - required as we might hang on to it
         * for indefinite time.
@@ -1888,6 +1885,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
        int le;
        int ret;
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+       if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) {
+               return -EPERM;
+       }
+#endif
+
        if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
                if (copy_from_user(&ifr, argp, ifreq_len))
                        return -EFAULT;
index 7e74ac3ad81519491ac01460cd3b578e6fb3cc80..bcf29bf6f727be6b16a52da24f1213b5e73d2fb4 100644 (file)
@@ -3401,10 +3401,6 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
                goto err;
        }
 
-       /* Allow full data communication using DPC from now on. */
-       brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
-       bcmerror = 0;
-
 err:
        brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
        sdio_release_host(bus->sdiodev->func[1]);
@@ -4112,6 +4108,9 @@ static void brcmf_sdio_firmware_callback(struct device *dev,
        }
 
        if (err == 0) {
+               /* Allow full data communication using DPC from now on. */
+               brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
+
                err = brcmf_sdiod_intr_register(sdiodev);
                if (err != 0)
                        brcmf_err("intr register failed:%d\n", err);
index e92f2639af2c8835d5c2faddd2c5630e99f89067..9fd3c6af0a6175f047eeb5d04e4ed002a4726e7e 100644 (file)
@@ -549,6 +549,11 @@ static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret;
 
+       /* Disable filtering */
+       ret = wl1271_acx_group_address_tbl(wl, wlvif, false, NULL, 0);
+       if (ret < 0)
+               return ret;
+
        ret = wl1271_acx_ap_max_tx_retry(wl, wlvif);
        if (ret < 0)
                return ret;
index 3e90bce70545a759b415081050d0762f4ae3c640..c6d196188bc9f06b0da4d22ec50fdaaf157d9601 100644 (file)
@@ -946,36 +946,66 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
        return 0;
 }
 
+/*
+ * Convert configs to something easy to use in C code
+ */
+#if defined(CONFIG_CMDLINE_FORCE)
+static const int overwrite_incoming_cmdline = 1;
+static const int read_dt_cmdline;
+static const int concat_cmdline;
+#elif defined(CONFIG_CMDLINE_EXTEND)
+static const int overwrite_incoming_cmdline;
+static const int read_dt_cmdline = 1;
+static const int concat_cmdline = 1;
+#else /* CMDLINE_FROM_BOOTLOADER */
+static const int overwrite_incoming_cmdline;
+static const int read_dt_cmdline = 1;
+static const int concat_cmdline;
+#endif
+
+#ifdef CONFIG_CMDLINE
+static const char *config_cmdline = CONFIG_CMDLINE;
+#else
+static const char *config_cmdline = "";
+#endif
+
 int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
                                     int depth, void *data)
 {
-       int l;
-       const char *p;
+       int l = 0;
+       const char *p = NULL;
+       char *cmdline = data;
 
        pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
 
-       if (depth != 1 || !data ||
+       if (depth != 1 || !cmdline ||
            (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
                return 0;
 
        early_init_dt_check_for_initrd(node);
 
-       /* Retrieve command line */
-       p = of_get_flat_dt_prop(node, "bootargs", &l);
-       if (p != NULL && l > 0)
-               strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE));
-
-       /*
-        * CONFIG_CMDLINE is meant to be a default in case nothing else
-        * managed to set the command line, unless CONFIG_CMDLINE_FORCE
-        * is set in which case we override whatever was found earlier.
-        */
-#ifdef CONFIG_CMDLINE
-#ifndef CONFIG_CMDLINE_FORCE
-       if (!((char *)data)[0])
-#endif
-               strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif /* CONFIG_CMDLINE */
+       /* Put CONFIG_CMDLINE in if forced or if data had nothing in it to start */
+       if (overwrite_incoming_cmdline || !cmdline[0])
+               strlcpy(cmdline, config_cmdline, COMMAND_LINE_SIZE);
+
+       /* Retrieve command line unless forcing */
+       if (read_dt_cmdline)
+               p = of_get_flat_dt_prop(node, "bootargs", &l);
+
+       if (p != NULL && l > 0) {
+               if (concat_cmdline) {
+                       int cmdline_len;
+                       int copy_len;
+                       strlcat(cmdline, " ", COMMAND_LINE_SIZE);
+                       cmdline_len = strlen(cmdline);
+                       copy_len = COMMAND_LINE_SIZE - cmdline_len - 1;
+                       copy_len = min((int)l, copy_len);
+                       strncpy(cmdline + cmdline_len, p, copy_len);
+                       cmdline[cmdline_len + copy_len] = '\0';
+               } else {
+                       strlcpy(cmdline, p, min((int)l, COMMAND_LINE_SIZE));
+               }
+       }
 
        pr_debug("Command line is: %s\n", (char*)data);
 
index 0adccbf5c83f116bd698c392c47b75a9197f892f..5bd93a25d2ce75d118e565f230963784bb409a06 100644 (file)
@@ -4,8 +4,6 @@ endif
 if MIPS
 source "drivers/platform/mips/Kconfig"
 endif
-if GOLDFISH
 source "drivers/platform/goldfish/Kconfig"
-endif
 
 source "drivers/platform/chrome/Kconfig"
index 635ef25cc722a0c01ee13fd95ddfe78b6bbd6f39..50331e3e54f31f87ed476e27f185b2f2c892931b 100644 (file)
@@ -1,5 +1,23 @@
+menuconfig GOLDFISH
+       bool "Platform support for Goldfish virtual devices"
+       depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
+       ---help---
+         Say Y here to get to see options for the Goldfish virtual platform.
+         This option alone does not add any kernel code.
+
+         Unless you are building for the Android Goldfish emulator say N here.
+
+if GOLDFISH
+
+config GOLDFISH_BUS
+       bool "Goldfish platform bus"
+       ---help---
+         This is a virtual bus to host Goldfish Android Virtual Devices.
+
 config GOLDFISH_PIPE
        tristate "Goldfish virtual device for QEMU pipes"
        ---help---
          This is a virtual device to drive the QEMU pipe interface used by
          the Goldfish Android Virtual Device.
+
+endif # GOLDFISH
index a0022395eee93f38c6a981cc9145cbe5ae8774f4..277a820ee4e11c2477060eb7af79936a2a5a4f15 100644 (file)
@@ -1,5 +1,6 @@
 #
 # Makefile for Goldfish platform specific drivers
 #
-obj-$(CONFIG_GOLDFISH) +=      pdev_bus.o
-obj-$(CONFIG_GOLDFISH_PIPE)    += goldfish_pipe.o
+obj-$(CONFIG_GOLDFISH_BUS)     += pdev_bus.o
+obj-$(CONFIG_GOLDFISH_PIPE)    += goldfish_pipe_all.o
+goldfish_pipe_all-objs := goldfish_pipe.o goldfish_pipe_v2.o
index e7a29e2750c6aeb274047b37a9d32eb3fe70961d..fd1452e283522d0e79943e9fa85db57351a32221 100644 (file)
@@ -2,6 +2,7 @@
  * Copyright (C) 2011 Google, Inc.
  * Copyright (C) 2012 Intel, Inc.
  * Copyright (C) 2013 Intel, Inc.
+ * Copyright (C) 2014 Linaro Limited
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
  *
  */
 
-/* This source file contains the implementation of a special device driver
- * that intends to provide a *very* fast communication channel between the
- * guest system and the QEMU emulator.
- *
- * Usage from the guest is simply the following (error handling simplified):
- *
- *    int  fd = open("/dev/qemu_pipe",O_RDWR);
- *    .... write() or read() through the pipe.
- *
- * This driver doesn't deal with the exact protocol used during the session.
- * It is intended to be as simple as something like:
- *
- *    // do this _just_ after opening the fd to connect to a specific
- *    // emulator service.
- *    const char*  msg = "<pipename>";
- *    if (write(fd, msg, strlen(msg)+1) < 0) {
- *       ... could not connect to <pipename> service
- *       close(fd);
- *    }
- *
- *    // after this, simply read() and write() to communicate with the
- *    // service. Exact protocol details left as an exercise to the reader.
- *
- * This driver is very fast because it doesn't copy any data through
- * intermediate buffers, since the emulator is capable of translating
- * guest user addresses into host ones.
- *
- * Note that we must however ensure that each user page involved in the
- * exchange is properly mapped during a transfer.
+/* This source file contains the implementation of the legacy version of
+ * a goldfish pipe device driver. See goldfish_pipe_v2.c for the current
+ * version.
  */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/miscdevice.h>
-#include <linux/platform_device.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include <linux/bitops.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/goldfish.h>
+#include "goldfish_pipe.h"
 
 /*
  * IMPORTANT: The following constants must match the ones used and defined
@@ -75,6 +38,7 @@
 #define PIPE_REG_PARAMS_ADDR_LOW       0x18  /* read/write: batch data address */
 #define PIPE_REG_PARAMS_ADDR_HIGH      0x1c  /* read/write: batch data address */
 #define PIPE_REG_ACCESS_PARAMS         0x20  /* write: batch access */
+#define PIPE_REG_VERSION               0x24  /* read: device version */
 
 /* list of commands for PIPE_REG_COMMAND */
 #define CMD_OPEN                       1  /* open new channel */
 #define CMD_WRITE_BUFFER       4  /* send a user buffer to the emulator */
 #define CMD_WAKE_ON_WRITE      5  /* tell the emulator to wake us when writing
                                     is possible */
-
-/* The following commands are related to read operations, they must be
- * listed in the same order than the corresponding write ones, since we
- * will use (CMD_READ_BUFFER - CMD_WRITE_BUFFER) as a special offset
- * in goldfish_pipe_read_write() below.
- */
 #define CMD_READ_BUFFER        6  /* receive a user buffer from the emulator */
 #define CMD_WAKE_ON_READ       7  /* tell the emulator to wake us when reading
                                   * is possible */
 #define PIPE_WAKE_READ         (1 << 1)  /* pipe can now be read from */
 #define PIPE_WAKE_WRITE        (1 << 2)  /* pipe can now be written to */
 
-struct access_params {
-       unsigned long channel;
-       u32 size;
-       unsigned long address;
-       u32 cmd;
-       u32 result;
-       /* reserved for future extension */
-       u32 flags;
-};
+#define MAX_PAGES_TO_GRAB 32
 
-/* The global driver data. Holds a reference to the i/o page used to
- * communicate with the emulator, and a wake queue for blocked tasks
- * waiting to be awoken.
- */
-struct goldfish_pipe_dev {
-       spinlock_t lock;
-       unsigned char __iomem *base;
-       struct access_params *aps;
-       int irq;
-};
+#define DEBUG 0
 
-static struct goldfish_pipe_dev   pipe_dev[1];
+#if DEBUG
+#define DPRINT(...) { printk(KERN_ERR __VA_ARGS__); }
+#else
+#define DPRINT(...)
+#endif
 
 /* This data type models a given pipe instance */
 struct goldfish_pipe {
@@ -142,6 +87,15 @@ struct goldfish_pipe {
        wait_queue_head_t wake_queue;
 };
 
+struct access_params {
+       unsigned long channel;
+       u32 size;
+       unsigned long address;
+       u32 cmd;
+       u32 result;
+       /* reserved for future extension */
+       u32 flags;
+};
 
 /* Bit flags for the 'flags' field */
 enum {
@@ -232,8 +186,10 @@ static int setup_access_params_addr(struct platform_device *pdev,
        if (valid_batchbuffer_addr(dev, aps)) {
                dev->aps = aps;
                return 0;
-       } else
+       } else {
+               devm_kfree(&pdev->dev, aps);
                return -1;
+       }
 }
 
 /* A value that will not be set by qemu emulator */
@@ -263,19 +219,15 @@ static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
        return 0;
 }
 
-/* This function is used for both reading from and writing to a given
- * pipe.
- */
 static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
                                    size_t bufflen, int is_write)
 {
        unsigned long irq_flags;
        struct goldfish_pipe *pipe = filp->private_data;
        struct goldfish_pipe_dev *dev = pipe->dev;
-       const int cmd_offset = is_write ? 0
-                                       : (CMD_READ_BUFFER - CMD_WRITE_BUFFER);
        unsigned long address, address_end;
-       int ret = 0;
+       struct page* pages[MAX_PAGES_TO_GRAB] = {};
+       int count = 0, ret = -EINVAL;
 
        /* If the emulator already closed the pipe, no need to go further */
        if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
@@ -298,79 +250,127 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
        address_end = address + bufflen;
 
        while (address < address_end) {
-               unsigned long  page_end = (address & PAGE_MASK) + PAGE_SIZE;
-               unsigned long  next     = page_end < address_end ? page_end
-                                                                : address_end;
-               unsigned long  avail    = next - address;
-               int status, wakeBit;
-
-               /* Ensure that the corresponding page is properly mapped */
-               /* FIXME: this isn't safe or sufficient - use get_user_pages */
-               if (is_write) {
-                       char c;
-                       /* Ensure that the page is mapped and readable */
-                       if (__get_user(c, (char __user *)address)) {
-                               if (!ret)
-                                       ret = -EFAULT;
-                               break;
-                       }
-               } else {
-                       /* Ensure that the page is mapped and writable */
-                       if (__put_user(0, (char __user *)address)) {
-                               if (!ret)
-                                       ret = -EFAULT;
+               unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
+               unsigned long next, avail;
+               int status, wakeBit, page_i, num_contiguous_pages;
+               long first_page, last_page, requested_pages;
+               unsigned long xaddr, xaddr_prev, xaddr_i;
+
+               /*
+                * Attempt to grab multiple physically contiguous pages.
+                */
+               first_page = address & PAGE_MASK;
+               last_page = (address_end - 1) & PAGE_MASK;
+               requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
+               if (requested_pages > MAX_PAGES_TO_GRAB) {
+                       requested_pages = MAX_PAGES_TO_GRAB;
+               }
+               ret = get_user_pages_fast(first_page, requested_pages,
+                               !is_write, pages);
+
+               DPRINT("%s: requested pages: %d %d %p\n", __FUNCTION__,
+                       ret, requested_pages, first_page);
+               if (ret == 0) {
+                       DPRINT("%s: error: (requested pages == 0) (wanted %d)\n",
+                                       __FUNCTION__, requested_pages);
+                       mutex_unlock(&pipe->lock);
+                       return ret;
+               }
+               if (ret < 0) {
+                       DPRINT("%s: (requested pages < 0) %d \n",
+                                       __FUNCTION__, requested_pages);
+                       mutex_unlock(&pipe->lock);
+                       return ret;
+               }
+
+               xaddr = page_to_phys(pages[0]) | (address & ~PAGE_MASK);
+               xaddr_prev = xaddr;
+               num_contiguous_pages = ret == 0 ? 0 : 1;
+               for (page_i = 1; page_i < ret; page_i++) {
+                       xaddr_i = page_to_phys(pages[page_i]) | (address & ~PAGE_MASK);
+                       if (xaddr_i == xaddr_prev + PAGE_SIZE) {
+                               page_end += PAGE_SIZE;
+                               xaddr_prev = xaddr_i;
+                               num_contiguous_pages++;
+                       } else {
+                               DPRINT("%s: discontinuous page boundary: %d pages instead\n",
+                                               __FUNCTION__, page_i);
                                break;
                        }
                }
+               next = page_end < address_end ? page_end : address_end;
+               avail = next - address;
 
                /* Now, try to transfer the bytes in the current page */
                spin_lock_irqsave(&dev->lock, irq_flags);
-               if (access_with_param(dev, CMD_WRITE_BUFFER + cmd_offset,
-                               address, avail, pipe, &status)) {
+               if (access_with_param(dev,
+                                       is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
+                                       xaddr, avail, pipe, &status)) {
                        gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
                                     dev->base + PIPE_REG_CHANNEL_HIGH);
                        writel(avail, dev->base + PIPE_REG_SIZE);
-                       gf_write_ptr((void *)address,
+                       gf_write_ptr((void *)xaddr,
                                     dev->base + PIPE_REG_ADDRESS,
                                     dev->base + PIPE_REG_ADDRESS_HIGH);
-                       writel(CMD_WRITE_BUFFER + cmd_offset,
-                                       dev->base + PIPE_REG_COMMAND);
+                       writel(is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
+                              dev->base + PIPE_REG_COMMAND);
                        status = readl(dev->base + PIPE_REG_STATUS);
                }
                spin_unlock_irqrestore(&dev->lock, irq_flags);
 
+               for (page_i = 0; page_i < ret; page_i++) {
+                       if (status > 0 && !is_write &&
+                               page_i < num_contiguous_pages) {
+                               set_page_dirty(pages[page_i]);
+                       }
+                       put_page(pages[page_i]);
+               }
+
                if (status > 0) { /* Correct transfer */
-                       ret += status;
+                       count += status;
                        address += status;
                        continue;
-               }
-
-               if (status == 0)  /* EOF */
+               } else if (status == 0) { /* EOF */
+                       ret = 0;
                        break;
-
-               /* An error occured. If we already transfered stuff, just
-               * return with its count. We expect the next call to return
-               * an error code */
-               if (ret > 0)
+               } else if (status < 0 && count > 0) {
+                       /*
+                        * An error occured and we already transfered
+                        * something on one of the previous pages.
+                        * Just return what we already copied and log this
+                        * err.
+                        *
+                        * Note: This seems like an incorrect approach but
+                        * cannot change it until we check if any user space
+                        * ABI relies on this behavior.
+                        */
+                       if (status != PIPE_ERROR_AGAIN)
+                               pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n",
+                                               status, is_write ? "write" : "read");
+                       ret = 0;
                        break;
+               }
 
-               /* If the error is not PIPE_ERROR_AGAIN, or if we are not in
-               * non-blocking mode, just return the error code.
-               */
+               /*
+                * If the error is not PIPE_ERROR_AGAIN, or if we are not in
+                * non-blocking mode, just return the error code.
+                */
                if (status != PIPE_ERROR_AGAIN ||
-                       (filp->f_flags & O_NONBLOCK) != 0) {
+                               (filp->f_flags & O_NONBLOCK) != 0) {
                        ret = goldfish_pipe_error_convert(status);
                        break;
                }
 
-               /* We will have to wait until more data/space is available.
-               * First, mark the pipe as waiting for a specific wake signal.
-               */
+               /*
+                * The backend blocked the read/write, wait until the backend
+                * tells us it's ready to process more data.
+                */
                wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
                set_bit(wakeBit, &pipe->flags);
 
                /* Tell the emulator we're going to wait for a wake event */
-               goldfish_cmd(pipe, CMD_WAKE_ON_WRITE + cmd_offset);
+               goldfish_cmd(pipe,
+                               is_write ? CMD_WAKE_ON_WRITE : CMD_WAKE_ON_READ);
 
                /* Unlock the pipe, then wait for the wake signal */
                mutex_unlock(&pipe->lock);
@@ -388,12 +388,13 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
                /* Try to re-acquire the lock */
                if (mutex_lock_interruptible(&pipe->lock))
                        return -ERESTARTSYS;
-
-               /* Try the transfer again */
-               continue;
        }
        mutex_unlock(&pipe->lock);
-       return ret;
+
+       if (ret < 0)
+               return ret;
+       else
+               return count;
 }
 
 static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
@@ -446,10 +447,11 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
        unsigned long irq_flags;
        int count = 0;
 
-       /* We're going to read from the emulator a list of (channel,flags)
-       * pairs corresponding to the wake events that occured on each
-       * blocked pipe (i.e. channel).
-       */
+       /*
+        * We're going to read from the emulator a list of (channel,flags)
+        * pairs corresponding to the wake events that occured on each
+        * blocked pipe (i.e. channel).
+        */
        spin_lock_irqsave(&dev->lock, irq_flags);
        for (;;) {
                /* First read the channel, 0 means the end of the list */
@@ -514,6 +516,8 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file)
 
        pipe->dev = dev;
        mutex_init(&pipe->lock);
+       DPRINT("%s: call. pipe_dev pipe_dev=0x%lx new_pipe_addr=0x%lx file=0x%lx\n", __FUNCTION__, pipe_dev, pipe, file);
+       // spin lock init, write head of list, i guess
        init_waitqueue_head(&pipe->wake_queue);
 
        /*
@@ -536,6 +540,7 @@ static int goldfish_pipe_release(struct inode *inode, struct file *filp)
 {
        struct goldfish_pipe *pipe = filp->private_data;
 
+       DPRINT("%s: call. pipe=0x%lx file=0x%lx\n", __FUNCTION__, pipe, filp);
        /* The guest is closing the channel, so tell the emulator right now */
        goldfish_cmd(pipe, CMD_CLOSE);
        kfree(pipe);
@@ -552,77 +557,33 @@ static const struct file_operations goldfish_pipe_fops = {
        .release = goldfish_pipe_release,
 };
 
-static struct miscdevice goldfish_pipe_device = {
+static struct miscdevice goldfish_pipe_dev = {
        .minor = MISC_DYNAMIC_MINOR,
        .name = "goldfish_pipe",
        .fops = &goldfish_pipe_fops,
 };
 
-static int goldfish_pipe_probe(struct platform_device *pdev)
+int goldfish_pipe_device_init_v1(struct platform_device *pdev)
 {
-       int err;
-       struct resource *r;
        struct goldfish_pipe_dev *dev = pipe_dev;
-
-       /* not thread safe, but this should not happen */
-       WARN_ON(dev->base != NULL);
-
-       spin_lock_init(&dev->lock);
-
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (r == NULL || resource_size(r) < PAGE_SIZE) {
-               dev_err(&pdev->dev, "can't allocate i/o page\n");
-               return -EINVAL;
-       }
-       dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
-       if (dev->base == NULL) {
-               dev_err(&pdev->dev, "ioremap failed\n");
-               return -EINVAL;
-       }
-
-       r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (r == NULL) {
-               err = -EINVAL;
-               goto error;
-       }
-       dev->irq = r->start;
-
-       err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
+       int err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
                                IRQF_SHARED, "goldfish_pipe", dev);
        if (err) {
-               dev_err(&pdev->dev, "unable to allocate IRQ\n");
-               goto error;
+               dev_err(&pdev->dev, "unable to allocate IRQ for v1\n");
+               return err;
        }
 
-       err = misc_register(&goldfish_pipe_device);
+       err = misc_register(&goldfish_pipe_dev);
        if (err) {
-               dev_err(&pdev->dev, "unable to register device\n");
-               goto error;
+               dev_err(&pdev->dev, "unable to register v1 device\n");
+               return err;
        }
+
        setup_access_params_addr(pdev, dev);
        return 0;
-
-error:
-       dev->base = NULL;
-       return err;
 }
 
-static int goldfish_pipe_remove(struct platform_device *pdev)
+void goldfish_pipe_device_deinit_v1(struct platform_device *pdev)
 {
-       struct goldfish_pipe_dev *dev = pipe_dev;
-       misc_deregister(&goldfish_pipe_device);
-       dev->base = NULL;
-       return 0;
+    misc_deregister(&goldfish_pipe_dev);
 }
-
-static struct platform_driver goldfish_pipe = {
-       .probe = goldfish_pipe_probe,
-       .remove = goldfish_pipe_remove,
-       .driver = {
-               .name = "goldfish_pipe"
-       }
-};
-
-module_platform_driver(goldfish_pipe);
-MODULE_AUTHOR("David Turner <digit@google.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/platform/goldfish/goldfish_pipe.h b/drivers/platform/goldfish/goldfish_pipe.h
new file mode 100644 (file)
index 0000000..9b75a51
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef GOLDFISH_PIPE_H
+#define GOLDFISH_PIPE_H
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/goldfish.h>
+#include <linux/mm.h>
+#include <linux/acpi.h>
+
+
+/* Initialize the legacy version of the pipe device driver */
+int goldfish_pipe_device_init_v1(struct platform_device *pdev);
+
+/* Deinitialize the legacy version of the pipe device driver */
+void goldfish_pipe_device_deinit_v1(struct platform_device *pdev);
+
+/* Forward declarations for the device struct */
+struct goldfish_pipe;
+struct goldfish_pipe_device_buffers;
+
+/* The global driver data. Holds a reference to the i/o page used to
+ * communicate with the emulator, and a wake queue for blocked tasks
+ * waiting to be awoken.
+ */
+struct goldfish_pipe_dev {
+       /*
+        * Global device spinlock. Protects the following members:
+        *  - pipes, pipes_capacity
+        *  - [*pipes, *pipes + pipes_capacity) - array data
+        *  - first_signalled_pipe,
+        *      goldfish_pipe::prev_signalled,
+        *      goldfish_pipe::next_signalled,
+        *      goldfish_pipe::signalled_flags - all singnalled-related fields,
+        *                                       in all allocated pipes
+        *  - open_command_params - PIPE_CMD_OPEN-related buffers
+        *
+        * It looks like a lot of different fields, but the trick is that the only
+        * operation that happens often is the signalled pipes array manipulation.
+        * That's why it's OK for now to keep the rest of the fields under the same
+        * lock. If we notice too much contention because of PIPE_CMD_OPEN,
+        * then we should add a separate lock there.
+        */
+       spinlock_t lock;
+
+       /*
+        * Array of the pipes of |pipes_capacity| elements,
+        * indexed by goldfish_pipe::id
+        */
+       struct goldfish_pipe **pipes;
+       u32 pipes_capacity;
+
+       /* Pointers to the buffers host uses for interaction with this driver */
+       struct goldfish_pipe_dev_buffers *buffers;
+
+       /* Head of a doubly linked list of signalled pipes */
+       struct goldfish_pipe *first_signalled_pipe;
+
+       /* Some device-specific data */
+       int irq;
+       int version;
+       unsigned char __iomem *base;
+
+       /* v1-specific access parameters */
+       struct access_params *aps;
+};
+
+extern struct goldfish_pipe_dev pipe_dev[1];
+
+#endif /* GOLDFISH_PIPE_H */
diff --git a/drivers/platform/goldfish/goldfish_pipe_v2.c b/drivers/platform/goldfish/goldfish_pipe_v2.c
new file mode 100644 (file)
index 0000000..ad373ed
--- /dev/null
@@ -0,0 +1,889 @@
+/*
+ * Copyright (C) 2012 Intel, Inc.
+ * Copyright (C) 2013 Intel, Inc.
+ * Copyright (C) 2014 Linaro Limited
+ * Copyright (C) 2011-2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This source file contains the implementation of a special device driver
+ * that intends to provide a *very* fast communication channel between the
+ * guest system and the QEMU emulator.
+ *
+ * Usage from the guest is simply the following (error handling simplified):
+ *
+ *    int  fd = open("/dev/qemu_pipe",O_RDWR);
+ *    .... write() or read() through the pipe.
+ *
+ * This driver doesn't deal with the exact protocol used during the session.
+ * It is intended to be as simple as something like:
+ *
+ *    // do this _just_ after opening the fd to connect to a specific
+ *    // emulator service.
+ *    const char*  msg = "<pipename>";
+ *    if (write(fd, msg, strlen(msg)+1) < 0) {
+ *       ... could not connect to <pipename> service
+ *       close(fd);
+ *    }
+ *
+ *    // after this, simply read() and write() to communicate with the
+ *    // service. Exact protocol details left as an exercise to the reader.
+ *
+ * This driver is very fast because it doesn't copy any data through
+ * intermediate buffers, since the emulator is capable of translating
+ * guest user addresses into host ones.
+ *
+ * Note that we must however ensure that each user page involved in the
+ * exchange is properly mapped during a transfer.
+ */
+
+#include "goldfish_pipe.h"
+
+
+/*
+ * Update this when something changes in the driver's behavior so the host
+ * can benefit from knowing it
+ */
+enum {
+       PIPE_DRIVER_VERSION = 2,
+       PIPE_CURRENT_DEVICE_VERSION = 2
+};
+
+/*
+ * IMPORTANT: The following constants must match the ones used and defined
+ * in external/qemu/hw/goldfish_pipe.c in the Android source tree.
+ */
+
+/* List of bitflags returned in status of CMD_POLL command */
+enum PipePollFlags {
+       PIPE_POLL_IN    = 1 << 0,
+       PIPE_POLL_OUT   = 1 << 1,
+       PIPE_POLL_HUP   = 1 << 2
+};
+
+/* Possible status values used to signal errors - see goldfish_pipe_error_convert */
+enum PipeErrors {
+       PIPE_ERROR_INVAL  = -1,
+       PIPE_ERROR_AGAIN  = -2,
+       PIPE_ERROR_NOMEM  = -3,
+       PIPE_ERROR_IO     = -4
+};
+
+/* Bit-flags used to signal events from the emulator */
+enum PipeWakeFlags {
+       PIPE_WAKE_CLOSED = 1 << 0,  /* emulator closed pipe */
+       PIPE_WAKE_READ   = 1 << 1,  /* pipe can now be read from */
+       PIPE_WAKE_WRITE  = 1 << 2  /* pipe can now be written to */
+};
+
+/* Bit flags for the 'flags' field */
+enum PipeFlagsBits {
+       BIT_CLOSED_ON_HOST = 0,  /* pipe closed by host */
+       BIT_WAKE_ON_WRITE  = 1,  /* want to be woken on writes */
+       BIT_WAKE_ON_READ   = 2,  /* want to be woken on reads */
+};
+
+enum PipeRegs {
+       PIPE_REG_CMD = 0,
+
+       PIPE_REG_SIGNAL_BUFFER_HIGH = 4,
+       PIPE_REG_SIGNAL_BUFFER = 8,
+       PIPE_REG_SIGNAL_BUFFER_COUNT = 12,
+
+       PIPE_REG_OPEN_BUFFER_HIGH = 20,
+       PIPE_REG_OPEN_BUFFER = 24,
+
+       PIPE_REG_VERSION = 36,
+
+       PIPE_REG_GET_SIGNALLED = 48,
+};
+
+enum PipeCmdCode {
+       PIPE_CMD_OPEN = 1,      /* to be used by the pipe device itself */
+       PIPE_CMD_CLOSE,
+       PIPE_CMD_POLL,
+       PIPE_CMD_WRITE,
+       PIPE_CMD_WAKE_ON_WRITE,
+       PIPE_CMD_READ,
+       PIPE_CMD_WAKE_ON_READ,
+
+       /*
+        * TODO(zyy): implement a deferred read/write execution to allow parallel
+        *  processing of pipe operations on the host.
+       */
+       PIPE_CMD_WAKE_ON_DONE_IO,
+};
+
+enum {
+       MAX_BUFFERS_PER_COMMAND = 336,
+       MAX_SIGNALLED_PIPES = 64,
+       INITIAL_PIPES_CAPACITY = 64
+};
+
+struct goldfish_pipe_dev;
+struct goldfish_pipe;
+struct goldfish_pipe_command;
+
+/* A per-pipe command structure, shared with the host */
+struct goldfish_pipe_command {
+       s32 cmd;                /* PipeCmdCode, guest -> host */
+       s32 id;                 /* pipe id, guest -> host */
+       s32 status;             /* command execution status, host -> guest */
+       s32 reserved;   /* to pad to 64-bit boundary */
+       union {
+               /* Parameters for PIPE_CMD_{READ,WRITE} */
+               struct {
+                       u32 buffers_count;                                      /* number of buffers, guest -> host */
+                       s32 consumed_size;                                      /* number of consumed bytes, host -> guest */
+                       u64 ptrs[MAX_BUFFERS_PER_COMMAND];      /* buffer pointers, guest -> host */
+                       u32 sizes[MAX_BUFFERS_PER_COMMAND];     /* buffer sizes, guest -> host */
+               } rw_params;
+       };
+};
+
+/* A single signalled pipe information */
+struct signalled_pipe_buffer {
+       u32 id;
+       u32 flags;
+};
+
+/* Parameters for the PIPE_CMD_OPEN command */
+struct open_command_param {
+       u64 command_buffer_ptr;
+       u32 rw_params_max_count;
+};
+
+/* Device-level set of buffers shared with the host */
+struct goldfish_pipe_dev_buffers {
+       struct open_command_param open_command_params;
+       struct signalled_pipe_buffer signalled_pipe_buffers[MAX_SIGNALLED_PIPES];
+};
+
+/* This data type models a given pipe instance */
+struct goldfish_pipe {
+       u32 id;                                                 /* pipe ID - index into goldfish_pipe_dev::pipes array */
+       unsigned long flags;                    /* The wake flags pipe is waiting for
+                                                                        * Note: not protected with any lock, uses atomic operations
+                                                                        *  and barriers to make it thread-safe.
+                                                                        */
+       unsigned long signalled_flags;  /* wake flags host have signalled,
+                                                                        *  - protected by goldfish_pipe_dev::lock */
+
+       struct goldfish_pipe_command *command_buffer;   /* A pointer to command buffer */
+
+       /* doubly linked list of signalled pipes, protected by goldfish_pipe_dev::lock */
+       struct goldfish_pipe *prev_signalled;
+       struct goldfish_pipe *next_signalled;
+
+       /*
+        * A pipe's own lock. Protects the following:
+        *  - *command_buffer - makes sure a command can safely write its parameters
+        *    to the host and read the results back.
+        */
+       struct mutex lock;
+
+       wait_queue_head_t wake_queue;   /* A wake queue for sleeping until host signals an event */
+       struct goldfish_pipe_dev *dev;  /* Pointer to the parent goldfish_pipe_dev instance */
+};
+
+struct goldfish_pipe_dev pipe_dev[1] = {};
+
+static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
+{
+       pipe->command_buffer->cmd = cmd;
+       pipe->command_buffer->status = PIPE_ERROR_INVAL;        /* failure by default */
+       writel(pipe->id, pipe->dev->base + PIPE_REG_CMD);
+       return pipe->command_buffer->status;
+}
+
+static int goldfish_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
+{
+       int status;
+       if (mutex_lock_interruptible(&pipe->lock))
+               return PIPE_ERROR_IO;
+       status = goldfish_cmd_locked(pipe, cmd);
+       mutex_unlock(&pipe->lock);
+       return status;
+}
+
+/*
+ * This function converts an error code returned by the emulator through
+ * the PIPE_REG_STATUS i/o register into a valid negative errno value.
+ */
+static int goldfish_pipe_error_convert(int status)
+{
+       switch (status) {
+       case PIPE_ERROR_AGAIN:
+               return -EAGAIN;
+       case PIPE_ERROR_NOMEM:
+               return -ENOMEM;
+       case PIPE_ERROR_IO:
+               return -EIO;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int pin_user_pages(unsigned long first_page, unsigned long last_page,
+       unsigned last_page_size, int is_write,
+       struct page *pages[MAX_BUFFERS_PER_COMMAND], unsigned *iter_last_page_size)
+{
+       int ret;
+       int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
+       if (requested_pages > MAX_BUFFERS_PER_COMMAND) {
+               requested_pages = MAX_BUFFERS_PER_COMMAND;
+               *iter_last_page_size = PAGE_SIZE;
+       } else {
+               *iter_last_page_size = last_page_size;
+       }
+
+       ret = get_user_pages_fast(
+                       first_page, requested_pages, !is_write, pages);
+       if (ret <= 0)
+               return -EFAULT;
+       if (ret < requested_pages)
+               *iter_last_page_size = PAGE_SIZE;
+       return ret;
+
+}
+
+static void release_user_pages(struct page **pages, int pages_count,
+       int is_write, s32 consumed_size)
+{
+       int i;
+       for (i = 0; i < pages_count; i++) {
+               if (!is_write && consumed_size > 0) {
+                       set_page_dirty(pages[i]);
+               }
+               put_page(pages[i]);
+       }
+}
+
+/* Populate the call parameters, merging adjacent pages together */
+static void populate_rw_params(
+       struct page **pages, int pages_count,
+       unsigned long address, unsigned long address_end,
+       unsigned long first_page, unsigned long last_page,
+       unsigned iter_last_page_size, int is_write,
+       struct goldfish_pipe_command *command)
+{
+       /*
+        * Process the first page separately - it's the only page that
+        * needs special handling for its start address.
+        */
+       unsigned long xaddr = page_to_phys(pages[0]);
+       unsigned long xaddr_prev = xaddr;
+       int buffer_idx = 0;
+       int i = 1;
+       int size_on_page = first_page == last_page
+                       ? (int)(address_end - address)
+                       : (PAGE_SIZE - (address & ~PAGE_MASK));
+       command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK));
+       command->rw_params.sizes[0] = size_on_page;
+       for (; i < pages_count; ++i) {
+               xaddr = page_to_phys(pages[i]);
+               size_on_page = (i == pages_count - 1) ? iter_last_page_size : PAGE_SIZE;
+               if (xaddr == xaddr_prev + PAGE_SIZE) {
+                       command->rw_params.sizes[buffer_idx] += size_on_page;
+               } else {
+                       ++buffer_idx;
+                       command->rw_params.ptrs[buffer_idx] = (u64)xaddr;
+                       command->rw_params.sizes[buffer_idx] = size_on_page;
+               }
+               xaddr_prev = xaddr;
+       }
+       command->rw_params.buffers_count = buffer_idx + 1;
+}
+
+static int transfer_max_buffers(struct goldfish_pipe* pipe,
+       unsigned long address, unsigned long address_end, int is_write,
+       unsigned long last_page, unsigned int last_page_size,
+       s32* consumed_size, int* status)
+{
+       struct page *pages[MAX_BUFFERS_PER_COMMAND];
+       unsigned long first_page = address & PAGE_MASK;
+       unsigned int iter_last_page_size;
+       int pages_count = pin_user_pages(first_page, last_page,
+                       last_page_size, is_write,
+                       pages, &iter_last_page_size);
+       if (pages_count < 0)
+               return pages_count;
+
+       /* Serialize access to the pipe command buffers */
+       if (mutex_lock_interruptible(&pipe->lock))
+               return -ERESTARTSYS;
+
+       populate_rw_params(pages, pages_count, address, address_end,
+               first_page, last_page, iter_last_page_size, is_write,
+               pipe->command_buffer);
+
+       /* Transfer the data */
+       *status = goldfish_cmd_locked(pipe,
+                                               is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
+
+       *consumed_size = pipe->command_buffer->rw_params.consumed_size;
+
+       mutex_unlock(&pipe->lock);
+
+       release_user_pages(pages, pages_count, is_write, *consumed_size);
+
+       return 0;
+}
+
+static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
+{
+       u32 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
+       set_bit(wakeBit, &pipe->flags);
+
+       /* Tell the emulator we're going to wait for a wake event */
+       (void)goldfish_cmd(pipe,
+                       is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ);
+
+       while (test_bit(wakeBit, &pipe->flags)) {
+               if (wait_event_interruptible(
+                               pipe->wake_queue,
+                               !test_bit(wakeBit, &pipe->flags)))
+                       return -ERESTARTSYS;
+
+               if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
+                       return -EIO;
+       }
+
+       return 0;
+}
+
+static ssize_t goldfish_pipe_read_write(struct file *filp,
+       char __user *buffer, size_t bufflen, int is_write)
+{
+       struct goldfish_pipe *pipe = filp->private_data;
+       int count = 0, ret = -EINVAL;
+       unsigned long address, address_end, last_page;
+       unsigned int last_page_size;
+
+       /* If the emulator already closed the pipe, no need to go further */
+       if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)))
+               return -EIO;
+       /* Null reads or writes succeeds */
+       if (unlikely(bufflen == 0))
+               return 0;
+       /* Check the buffer range for access */
+       if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
+                       buffer, bufflen)))
+               return -EFAULT;
+
+       address = (unsigned long)buffer;
+       address_end = address + bufflen;
+       last_page = (address_end - 1) & PAGE_MASK;
+       last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1;
+
+       while (address < address_end) {
+               s32 consumed_size;
+               int status;
+               ret = transfer_max_buffers(pipe, address, address_end, is_write,
+                               last_page, last_page_size, &consumed_size, &status);
+               if (ret < 0)
+                       break;
+
+               if (consumed_size > 0) {
+                       /* No matter what's the status, we've transfered something */
+                       count += consumed_size;
+                       address += consumed_size;
+               }
+               if (status > 0)
+                       continue;
+               if (status == 0) {
+                       /* EOF */
+                       ret = 0;
+                       break;
+               }
+               if (count > 0) {
+                       /*
+                        * An error occured, but we already transfered
+                        * something on one of the previous iterations.
+                        * Just return what we already copied and log this
+                        * err.
+                        */
+                       if (status != PIPE_ERROR_AGAIN)
+                               pr_info_ratelimited("goldfish_pipe: backend error %d on %s\n",
+                                                                       status, is_write ? "write" : "read");
+                       break;
+               }
+
+               /*
+                * If the error is not PIPE_ERROR_AGAIN, or if we are in
+                * non-blocking mode, just return the error code.
+                */
+               if (status != PIPE_ERROR_AGAIN || (filp->f_flags & O_NONBLOCK) != 0) {
+                       ret = goldfish_pipe_error_convert(status);
+                       break;
+               }
+
+               status = wait_for_host_signal(pipe, is_write);
+               if (status < 0)
+                       return status;
+       }
+
+       if (count > 0)
+               return count;
+       return ret;
+}
+
+static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
+                               size_t bufflen, loff_t *ppos)
+{
+       return goldfish_pipe_read_write(filp, buffer, bufflen, /* is_write */ 0);
+}
+
+static ssize_t goldfish_pipe_write(struct file *filp,
+                               const char __user *buffer, size_t bufflen,
+                               loff_t *ppos)
+{
+       return goldfish_pipe_read_write(filp,
+                       /* cast away the const */(char __user *)buffer, bufflen,
+                       /* is_write */ 1);
+}
+
+static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
+{
+       struct goldfish_pipe *pipe = filp->private_data;
+       unsigned int mask = 0;
+       int status;
+
+       poll_wait(filp, &pipe->wake_queue, wait);
+
+       status = goldfish_cmd(pipe, PIPE_CMD_POLL);
+       if (status < 0) {
+               return -ERESTARTSYS;
+       }
+
+       if (status & PIPE_POLL_IN)
+               mask |= POLLIN | POLLRDNORM;
+       if (status & PIPE_POLL_OUT)
+               mask |= POLLOUT | POLLWRNORM;
+       if (status & PIPE_POLL_HUP)
+               mask |= POLLHUP;
+       if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
+               mask |= POLLERR;
+
+       return mask;
+}
+
+static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
+       u32 id, u32 flags)
+{
+       struct goldfish_pipe *pipe;
+
+       BUG_ON(id >= dev->pipes_capacity);
+
+       pipe = dev->pipes[id];
+       if (!pipe)
+               return;
+       pipe->signalled_flags |= flags;
+
+       if (pipe->prev_signalled || pipe->next_signalled
+               || dev->first_signalled_pipe == pipe)
+               return; /* already in the list */
+       pipe->next_signalled = dev->first_signalled_pipe;
+       if (dev->first_signalled_pipe) {
+               dev->first_signalled_pipe->prev_signalled = pipe;
+       }
+       dev->first_signalled_pipe = pipe;
+}
+
+static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
+       struct goldfish_pipe *pipe) {
+       if (pipe->prev_signalled)
+               pipe->prev_signalled->next_signalled = pipe->next_signalled;
+       if (pipe->next_signalled)
+               pipe->next_signalled->prev_signalled = pipe->prev_signalled;
+       if (pipe == dev->first_signalled_pipe)
+               dev->first_signalled_pipe = pipe->next_signalled;
+       pipe->prev_signalled = NULL;
+       pipe->next_signalled = NULL;
+}
+
+static struct goldfish_pipe *signalled_pipes_pop_front(struct goldfish_pipe_dev *dev,
+               int *wakes)
+{
+       struct goldfish_pipe *pipe;
+       unsigned long flags;
+       spin_lock_irqsave(&dev->lock, flags);
+
+       pipe = dev->first_signalled_pipe;
+       if (pipe) {
+               *wakes = pipe->signalled_flags;
+               pipe->signalled_flags = 0;
+               /*
+                * This is an optimized version of signalled_pipes_remove_locked() -
+                * we want to make it as fast as possible to wake the sleeping pipe
+                * operations faster
+                */
+               dev->first_signalled_pipe = pipe->next_signalled;
+               if (dev->first_signalled_pipe)
+                       dev->first_signalled_pipe->prev_signalled = NULL;
+               pipe->next_signalled = NULL;
+       }
+
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return pipe;
+}
+
+static void goldfish_interrupt_task(unsigned long unused)
+{
+       struct goldfish_pipe_dev *dev = pipe_dev;
+       /* Iterate over the signalled pipes and wake them one by one */
+       struct goldfish_pipe *pipe;
+       int wakes;
+       while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) {
+               if (wakes & PIPE_WAKE_CLOSED) {
+                       pipe->flags = 1 << BIT_CLOSED_ON_HOST;
+               } else {
+                       if (wakes & PIPE_WAKE_READ)
+                               clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
+                       if (wakes & PIPE_WAKE_WRITE)
+                               clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
+               }
+               /*
+                * wake_up_interruptible() implies a write barrier, so don't explicitly
+                * add another one here.
+                */
+               wake_up_interruptible(&pipe->wake_queue);
+       }
+}
+DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0);
+
+/*
+ * The general idea of the interrupt handling:
+ *
+ *  1. device raises an interrupt if there's at least one signalled pipe
+ *  2. IRQ handler reads the signalled pipes and their count from the device
+ *  3. device writes them into a shared buffer and returns the count
+ *      it only resets the IRQ if it has returned all signalled pipes,
+ *      otherwise it leaves it raised, so IRQ handler will be called
+ *      again for the next chunk
+ *  4. IRQ handler adds all returned pipes to the device's signalled pipes list
+ *  5. IRQ handler launches a tasklet to process the signalled pipes from the
+ *      list in a separate context
+ */
+static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
+{
+       u32 count;
+       u32 i;
+       unsigned long flags;
+       struct goldfish_pipe_dev *dev = dev_id;
+       if (dev != pipe_dev)
+               return IRQ_NONE;
+
+       /* Request the signalled pipes from the device */
+       spin_lock_irqsave(&dev->lock, flags);
+
+       count = readl(dev->base + PIPE_REG_GET_SIGNALLED);
+       if (count == 0) {
+               spin_unlock_irqrestore(&dev->lock, flags);
+               return IRQ_NONE;
+       }
+       if (count > MAX_SIGNALLED_PIPES)
+               count = MAX_SIGNALLED_PIPES;
+
+       for (i = 0; i < count; ++i)
+               signalled_pipes_add_locked(dev,
+                       dev->buffers->signalled_pipe_buffers[i].id,
+                       dev->buffers->signalled_pipe_buffers[i].flags);
+
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       tasklet_schedule(&goldfish_interrupt_tasklet);
+       return IRQ_HANDLED;
+}
+
+static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
+{
+       int id;
+       for (id = 0; id < dev->pipes_capacity; ++id)
+               if (!dev->pipes[id])
+                       return id;
+
+       {
+               /* Reallocate the array */
+               u32 new_capacity = 2 * dev->pipes_capacity;
+               struct goldfish_pipe **pipes =
+                               kcalloc(new_capacity, sizeof(*pipes),
+                                       GFP_ATOMIC);
+               if (!pipes)
+                       return -ENOMEM;
+               memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
+               kfree(dev->pipes);
+               dev->pipes = pipes;
+               id = dev->pipes_capacity;
+               dev->pipes_capacity = new_capacity;
+       }
+       return id;
+}
+
+/**
+ *     goldfish_pipe_open - open a channel to the AVD
+ *     @inode: inode of device
+ *     @file: file struct of opener
+ *
+ *     Create a new pipe link between the emulator and the use application.
+ *     Each new request produces a new pipe.
+ *
+ *     Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
+ *     right now so this is fine. A move to 64bit will need this addressing
+ */
+static int goldfish_pipe_open(struct inode *inode, struct file *file)
+{
+       struct goldfish_pipe_dev *dev = pipe_dev;
+       unsigned long flags;
+       int id;
+       int status;
+
+       /* Allocate new pipe kernel object */
+       struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
+       if (pipe == NULL)
+               return -ENOMEM;
+
+       pipe->dev = dev;
+       mutex_init(&pipe->lock);
+       init_waitqueue_head(&pipe->wake_queue);
+
+       /*
+        * Command buffer needs to be allocated on its own page to make sure it is
+        * physically contiguous in host's address space.
+        */
+       pipe->command_buffer =
+                       (struct goldfish_pipe_command*)__get_free_page(GFP_KERNEL);
+       if (!pipe->command_buffer) {
+               status = -ENOMEM;
+               goto err_pipe;
+       }
+
+       spin_lock_irqsave(&dev->lock, flags);
+
+       id = get_free_pipe_id_locked(dev);
+       if (id < 0) {
+               status = id;
+               goto err_id_locked;
+       }
+
+       dev->pipes[id] = pipe;
+       pipe->id = id;
+       pipe->command_buffer->id = id;
+
+       /* Now tell the emulator we're opening a new pipe. */
+       dev->buffers->open_command_params.rw_params_max_count =
+                       MAX_BUFFERS_PER_COMMAND;
+       dev->buffers->open_command_params.command_buffer_ptr =
+                       (u64)(unsigned long)__pa(pipe->command_buffer);
+       status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN);
+       spin_unlock_irqrestore(&dev->lock, flags);
+       if (status < 0)
+               goto err_cmd;
+       /* All is done, save the pipe into the file's private data field */
+       file->private_data = pipe;
+       return 0;
+
+err_cmd:
+       spin_lock_irqsave(&dev->lock, flags);
+       dev->pipes[id] = NULL;
+err_id_locked:
+       spin_unlock_irqrestore(&dev->lock, flags);
+       free_page((unsigned long)pipe->command_buffer);
+err_pipe:
+       kfree(pipe);
+       return status;
+}
+
+static int goldfish_pipe_release(struct inode *inode, struct file *filp)
+{
+       unsigned long flags;
+       struct goldfish_pipe *pipe = filp->private_data;
+       struct goldfish_pipe_dev *dev = pipe->dev;
+
+       /* The guest is closing the channel, so tell the emulator right now */
+       (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE);
+
+       spin_lock_irqsave(&dev->lock, flags);
+       dev->pipes[pipe->id] = NULL;
+       signalled_pipes_remove_locked(dev, pipe);
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       filp->private_data = NULL;
+       free_page((unsigned long)pipe->command_buffer);
+       kfree(pipe);
+       return 0;
+}
+
+static const struct file_operations goldfish_pipe_fops = {
+       .owner = THIS_MODULE,
+       .read = goldfish_pipe_read,
+       .write = goldfish_pipe_write,
+       .poll = goldfish_pipe_poll,
+       .open = goldfish_pipe_open,
+       .release = goldfish_pipe_release,
+};
+
+static struct miscdevice goldfish_pipe_dev = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "goldfish_pipe",
+       .fops = &goldfish_pipe_fops,
+};
+
+static int goldfish_pipe_device_init_v2(struct platform_device *pdev)
+{
+       char *page;
+       struct goldfish_pipe_dev *dev = pipe_dev;
+       int err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
+                               IRQF_SHARED, "goldfish_pipe", dev);
+       if (err) {
+               dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
+               return err;
+       }
+
+       err = misc_register(&goldfish_pipe_dev);
+       if (err) {
+               dev_err(&pdev->dev, "unable to register v2 device\n");
+               return err;
+       }
+
+       dev->first_signalled_pipe = NULL;
+       dev->pipes_capacity = INITIAL_PIPES_CAPACITY;
+       dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), GFP_KERNEL);
+       if (!dev->pipes)
+               return -ENOMEM;
+
+       /*
+        * We're going to pass two buffers, open_command_params and
+        * signalled_pipe_buffers, to the host. This means each of those buffers
+        * needs to be contained in a single physical page. The easiest choice is
+        * to just allocate a page and place the buffers in it.
+        */
+       BUG_ON(sizeof(*dev->buffers) > PAGE_SIZE);
+       page = (char*)__get_free_page(GFP_KERNEL);
+       if (!page) {
+               kfree(dev->pipes);
+               return -ENOMEM;
+       }
+       dev->buffers = (struct goldfish_pipe_dev_buffers*)page;
+
+       /* Send the buffer addresses to the host */
+       {
+               u64 paddr = __pa(&dev->buffers->signalled_pipe_buffers);
+               writel((u32)(unsigned long)(paddr >> 32), dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
+               writel((u32)(unsigned long)paddr, dev->base + PIPE_REG_SIGNAL_BUFFER);
+               writel((u32)MAX_SIGNALLED_PIPES, dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
+
+               paddr = __pa(&dev->buffers->open_command_params);
+               writel((u32)(unsigned long)(paddr >> 32), dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
+               writel((u32)(unsigned long)paddr, dev->base + PIPE_REG_OPEN_BUFFER);
+       }
+       return 0;
+}
+
+static void goldfish_pipe_device_deinit_v2(struct platform_device *pdev) {
+       struct goldfish_pipe_dev *dev = pipe_dev;
+       misc_deregister(&goldfish_pipe_dev);
+       kfree(dev->pipes);
+       free_page((unsigned long)dev->buffers);
+}
+
+static int goldfish_pipe_probe(struct platform_device *pdev)
+{
+       int err;
+       struct resource *r;
+       struct goldfish_pipe_dev *dev = pipe_dev;
+
+       BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
+
+       /* not thread safe, but this should not happen */
+       WARN_ON(dev->base != NULL);
+
+       spin_lock_init(&dev->lock);
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (r == NULL || resource_size(r) < PAGE_SIZE) {
+               dev_err(&pdev->dev, "can't allocate i/o page\n");
+               return -EINVAL;
+       }
+       dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
+       if (dev->base == NULL) {
+               dev_err(&pdev->dev, "ioremap failed\n");
+               return -EINVAL;
+       }
+
+       r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (r == NULL) {
+               err = -EINVAL;
+               goto error;
+       }
+       dev->irq = r->start;
+
+       /*
+        * Exchange the versions with the host device
+        *
+        * Note: v1 driver used to not report its version, so we write it before
+        *  reading device version back: this allows the host implementation to
+        *  detect the old driver (if there was no version write before read).
+        */
+       writel((u32)PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION);
+       dev->version = readl(dev->base + PIPE_REG_VERSION);
+       if (dev->version < PIPE_CURRENT_DEVICE_VERSION) {
+               /* initialize the old device version */
+               err = goldfish_pipe_device_init_v1(pdev);
+       } else {
+               /* Host device supports the new interface */
+               err = goldfish_pipe_device_init_v2(pdev);
+       }
+       if (!err)
+               return 0;
+
+error:
+       dev->base = NULL;
+       return err;
+}
+
+static int goldfish_pipe_remove(struct platform_device *pdev)
+{
+       struct goldfish_pipe_dev *dev = pipe_dev;
+       if (dev->version < PIPE_CURRENT_DEVICE_VERSION)
+               goldfish_pipe_device_deinit_v1(pdev);
+       else
+               goldfish_pipe_device_deinit_v2(pdev);
+       dev->base = NULL;
+       return 0;
+}
+
+static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
+       { "GFSH0003", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match);
+
+static const struct of_device_id goldfish_pipe_of_match[] = {
+       { .compatible = "google,android-pipe", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
+
+static struct platform_driver goldfish_pipe_driver = {
+       .probe = goldfish_pipe_probe,
+       .remove = goldfish_pipe_remove,
+       .driver = {
+               .name = "goldfish_pipe",
+               .of_match_table = goldfish_pipe_of_match,
+               .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
+       }
+};
+
+module_platform_driver(goldfish_pipe_driver);
+MODULE_AUTHOR("David Turner <digit@google.com>");
+MODULE_LICENSE("GPL");
index a50bb988c69a0bbd49cbf455ee3304c066599bbb..f5c525e4482a18b533ad80c09b1e8a7da8be06a8 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/acpi.h>
 
 struct goldfish_battery_data {
        void __iomem *reg_base;
@@ -227,11 +228,25 @@ static int goldfish_battery_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id goldfish_battery_of_match[] = {
+       { .compatible = "google,goldfish-battery", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_battery_of_match);
+
+static const struct acpi_device_id goldfish_battery_acpi_match[] = {
+       { "GFSH0001", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_battery_acpi_match);
+
 static struct platform_driver goldfish_battery_device = {
        .probe          = goldfish_battery_probe,
        .remove         = goldfish_battery_remove,
        .driver = {
-               .name = "goldfish-battery"
+               .name = "goldfish-battery",
+               .of_match_table = goldfish_battery_of_match,
+               .acpi_match_table = ACPI_PTR(goldfish_battery_acpi_match),
        }
 };
 module_platform_driver(goldfish_battery_device);
index ed2d7fd0c734dc47fc192fe7cccf825c63e1a35b..280018d59d5a60fab0b7d97145bbff25761bd8aa 100644 (file)
@@ -106,7 +106,10 @@ static ssize_t power_supply_show_property(struct device *dev,
        else if (off >= POWER_SUPPLY_PROP_MODEL_NAME)
                return sprintf(buf, "%s\n", value.strval);
 
-       return sprintf(buf, "%d\n", value.intval);
+       if (off == POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT)
+               return sprintf(buf, "%lld\n", value.int64val);
+       else
+               return sprintf(buf, "%d\n", value.intval);
 }
 
 static ssize_t power_supply_store_property(struct device *dev,
@@ -197,6 +200,12 @@ static struct device_attribute power_supply_attrs[] = {
        POWER_SUPPLY_ATTR(scope),
        POWER_SUPPLY_ATTR(charge_term_current),
        POWER_SUPPLY_ATTR(calibrate),
+       /* Local extensions */
+       POWER_SUPPLY_ATTR(usb_hc),
+       POWER_SUPPLY_ATTR(usb_otg),
+       POWER_SUPPLY_ATTR(charge_enabled),
+       /* Local extensions of type int64_t */
+       POWER_SUPPLY_ATTR(charge_counter_ext),
        /* Properties of type `const char *' */
        POWER_SUPPLY_ATTR(model_name),
        POWER_SUPPLY_ATTR(manufacturer),
index 7ea2c471feca3864a20612b9720e25314b5f5e77..3e9663d0752b4b7d06bd9a091e6caab7ad29eb83 100644 (file)
@@ -45,6 +45,42 @@ struct palmas_rtc {
 /* Total number of RTC registers needed to set time*/
 #define PALMAS_NUM_TIME_REGS   (PALMAS_YEARS_REG - PALMAS_SECONDS_REG + 1)
 
+/*
+ * Special bin2bcd mapping to deal with bcd storage of year.
+ *
+ *   0-69                -> 0xD0
+ *  70-99  (1970 - 1999) -> 0xD0 - 0xF9 (correctly rolls to 0x00)
+ * 100-199 (2000 - 2099) -> 0x00 - 0x99 (does not roll to 0xA0 :-( )
+ * 200-229 (2100 - 2129) -> 0xA0 - 0xC9 (really for completeness)
+ * 230-                  -> 0xC9
+ *
+ * Confirmed: the only transition that does not work correctly for this rtc
+ * clock is the transition from 2099 to 2100, it proceeds to 2000. We will
+ * accept this issue since the clock retains and transitions the year correctly
+ * in all other conditions.
+ */
+static unsigned char year_bin2bcd(int val)
+{
+       if (val < 70)
+               return 0xD0;
+       if (val < 100)
+               return bin2bcd(val - 20) | 0x80; /* KISS leverage of bin2bcd */
+       if (val >= 230)
+               return 0xC9;
+       if (val >= 200)
+               return bin2bcd(val - 180) | 0x80;
+       return bin2bcd(val - 100);
+}
+
+static int year_bcd2bin(unsigned char val)
+{
+       if (val >= 0xD0)
+               return bcd2bin(val & 0x7F) + 20;
+       if (val >= 0xA0)
+               return bcd2bin(val & 0x7F) + 180;
+       return bcd2bin(val) + 100;
+}
+
 static int palmas_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
        unsigned char rtc_data[PALMAS_NUM_TIME_REGS];
@@ -71,7 +107,7 @@ static int palmas_rtc_read_time(struct device *dev, struct rtc_time *tm)
        tm->tm_hour = bcd2bin(rtc_data[2]);
        tm->tm_mday = bcd2bin(rtc_data[3]);
        tm->tm_mon = bcd2bin(rtc_data[4]) - 1;
-       tm->tm_year = bcd2bin(rtc_data[5]) + 100;
+       tm->tm_year = year_bcd2bin(rtc_data[5]);
 
        return ret;
 }
@@ -87,7 +123,7 @@ static int palmas_rtc_set_time(struct device *dev, struct rtc_time *tm)
        rtc_data[2] = bin2bcd(tm->tm_hour);
        rtc_data[3] = bin2bcd(tm->tm_mday);
        rtc_data[4] = bin2bcd(tm->tm_mon + 1);
-       rtc_data[5] = bin2bcd(tm->tm_year - 100);
+       rtc_data[5] = year_bin2bcd(tm->tm_year);
 
        /* Stop RTC while updating the RTC time registers */
        ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
@@ -142,7 +178,7 @@ static int palmas_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
        alm->time.tm_hour = bcd2bin(alarm_data[2]);
        alm->time.tm_mday = bcd2bin(alarm_data[3]);
        alm->time.tm_mon = bcd2bin(alarm_data[4]) - 1;
-       alm->time.tm_year = bcd2bin(alarm_data[5]) + 100;
+       alm->time.tm_year = year_bcd2bin(alarm_data[5]);
 
        ret = palmas_read(palmas, PALMAS_RTC_BASE, PALMAS_RTC_INTERRUPTS_REG,
                        &int_val);
@@ -173,7 +209,7 @@ static int palmas_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
        alarm_data[2] = bin2bcd(alm->time.tm_hour);
        alarm_data[3] = bin2bcd(alm->time.tm_mday);
        alarm_data[4] = bin2bcd(alm->time.tm_mon + 1);
-       alarm_data[5] = bin2bcd(alm->time.tm_year - 100);
+       alarm_data[5] = year_bin2bcd(alm->time.tm_year);
 
        ret = palmas_bulk_write(palmas, PALMAS_RTC_BASE,
                PALMAS_ALARM_SECONDS_REG, alarm_data, PALMAS_NUM_TIME_REGS);
index 85cd2564c15773f728e8bd6ae6fcde28867ab2bb..4167bdbf0ecf29a4e275824bf3aa36378845586a 100644 (file)
@@ -39,6 +39,7 @@
 
 #include <linux/async.h>
 #include <linux/devfreq.h>
+#include <linux/blkdev.h>
 
 #include "ufshcd.h"
 #include "unipro.h"
@@ -1332,6 +1333,17 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
                clear_bit_unlock(tag, &hba->lrb_in_use);
                goto out;
        }
+
+       /* IO svc time latency histogram */
+       if (hba != NULL && cmd->request != NULL) {
+               if (hba->latency_hist_enabled &&
+                   (cmd->request->cmd_type == REQ_TYPE_FS)) {
+                       cmd->request->lat_hist_io_start = ktime_get();
+                       cmd->request->lat_hist_enabled = 1;
+               } else
+                       cmd->request->lat_hist_enabled = 0;
+       }
+
        WARN_ON(hba->clk_gating.state != CLKS_ON);
 
        lrbp = &hba->lrb[tag];
@@ -3160,6 +3172,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
        u32 tr_doorbell;
        int result;
        int index;
+       struct request *req;
 
        /* Resetting interrupt aggregation counters first and reading the
         * DOOR_BELL afterward allows us to handle all the completed requests.
@@ -3184,6 +3197,22 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
                        /* Mark completed command as NULL in LRB */
                        lrbp->cmd = NULL;
                        clear_bit_unlock(index, &hba->lrb_in_use);
+                       req = cmd->request;
+                       if (req) {
+                               /* Update IO svc time latency histogram */
+                               if (req->lat_hist_enabled) {
+                                       ktime_t completion;
+                                       u_int64_t delta_us;
+
+                                       completion = ktime_get();
+                                       delta_us = ktime_us_delta(completion,
+                                                 req->lat_hist_io_start);
+                                       /* rq_data_dir() => true if WRITE */
+                                       blk_update_latency_hist(&hba->io_lat_s,
+                                               (rq_data_dir(req) == READ),
+                                               delta_us);
+                               }
+                       }
                        /* Do not touch lrbp after scsi done */
                        cmd->scsi_done(cmd);
                        __ufshcd_release(hba);
@@ -5327,6 +5356,54 @@ out:
 }
 EXPORT_SYMBOL(ufshcd_shutdown);
 
+/*
+ * Values permitted 0, 1, 2.
+ * 0 -> Disable IO latency histograms (default)
+ * 1 -> Enable IO latency histograms
+ * 2 -> Zero out IO latency histograms
+ */
+static ssize_t
+latency_hist_store(struct device *dev, struct device_attribute *attr,
+                  const char *buf, size_t count)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       long value;
+
+       if (kstrtol(buf, 0, &value))
+               return -EINVAL;
+       if (value == BLK_IO_LAT_HIST_ZERO)
+               blk_zero_latency_hist(&hba->io_lat_s);
+       else if (value == BLK_IO_LAT_HIST_ENABLE ||
+                value == BLK_IO_LAT_HIST_DISABLE)
+               hba->latency_hist_enabled = value;
+       return count;
+}
+
+ssize_t
+latency_hist_show(struct device *dev, struct device_attribute *attr,
+                 char *buf)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+
+       return blk_latency_hist_show(&hba->io_lat_s, buf);
+}
+
+static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
+                  latency_hist_show, latency_hist_store);
+
+static void
+ufshcd_init_latency_hist(struct ufs_hba *hba)
+{
+       if (device_create_file(hba->dev, &dev_attr_latency_hist))
+               dev_err(hba->dev, "Failed to create latency_hist sysfs entry\n");
+}
+
+static void
+ufshcd_exit_latency_hist(struct ufs_hba *hba)
+{
+       device_create_file(hba->dev, &dev_attr_latency_hist);
+}
+
 /**
  * ufshcd_remove - de-allocate SCSI host and host memory space
  *             data structure memory
@@ -5342,6 +5419,7 @@ void ufshcd_remove(struct ufs_hba *hba)
        scsi_host_put(hba->host);
 
        ufshcd_exit_clk_gating(hba);
+       ufshcd_exit_latency_hist(hba);
        if (ufshcd_is_clkscaling_enabled(hba))
                devfreq_remove_device(hba->devfreq);
        ufshcd_hba_exit(hba);
@@ -5639,6 +5717,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        /* Hold auto suspend until async scan completes */
        pm_runtime_get_sync(dev);
 
+       ufshcd_init_latency_hist(hba);
+
        /*
         * The device-initialize-sequence hasn't been invoked yet.
         * Set the device to power-off state
@@ -5653,6 +5733,7 @@ out_remove_scsi_host:
        scsi_remove_host(hba->host);
 exit_gating:
        ufshcd_exit_clk_gating(hba);
+       ufshcd_exit_latency_hist(hba);
 out_disable:
        hba->is_irq_enabled = false;
        scsi_host_put(host);
index 2570d9477b3778c9505e616f72c22e0b30fad8be..f3780cf7d89520528efdfbab4561a27b1fde1a1d 100644 (file)
@@ -532,6 +532,9 @@ struct ufs_hba {
        struct devfreq *devfreq;
        struct ufs_clk_scaling clk_scaling;
        bool is_sys_suspended;
+
+       int                     latency_hist_enabled;
+       struct io_latency_state io_lat_s;
 };
 
 /* Returns true if clocks can be gated. Otherwise false */
index 42b15126aa06bc0828405b0b6fe65b733f7eb871..0c0c092ce354acaf6bb22a4cf9145c4b4e846eb8 100644 (file)
@@ -38,6 +38,15 @@ config ANDROID_LOW_MEMORY_KILLER
          scripts (/init.rc), and it defines priority values with minimum free memory size
          for each priority.
 
+config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+       bool "Android Low Memory Killer: detect oom_adj values"
+       depends on ANDROID_LOW_MEMORY_KILLER
+       default y
+       ---help---
+         Detect oom_adj values written to
+         /sys/module/lowmemorykiller/parameters/adj and convert them
+         to oom_score_adj values.
+
 config SYNC
        bool "Synchronization framework"
        default n
@@ -68,6 +77,8 @@ config SW_SYNC_USER
 
 source "drivers/staging/android/ion/Kconfig"
 
+source "drivers/staging/android/fiq_debugger/Kconfig"
+
 endif # if ANDROID
 
 endmenu
index c7b6c99cc5ceca8ac31e9116209bcc049797f055..fcb7edca24c9db75731b60fea6f66f00dc5ef1ac 100644 (file)
@@ -1,6 +1,7 @@
 ccflags-y += -I$(src)                  # needed for trace events
 
 obj-y                                  += ion/
+obj-$(CONFIG_FIQ_DEBUGGER)             += fiq_debugger/
 
 obj-$(CONFIG_ASHMEM)                   += ashmem.o
 obj-$(CONFIG_ANDROID_TIMED_OUTPUT)     += timed_output.o
index 3f2a3d611e4bea033b7943085f4fef747112be89..3f1133230a1afeb2f01e2fee15f524e4bd2f28b2 100644 (file)
@@ -396,22 +396,14 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
        }
        get_file(asma->file);
 
-       /*
-        * XXX - Reworked to use shmem_zero_setup() instead of
-        * shmem_set_file while we're in staging. -jstultz
-        */
-       if (vma->vm_flags & VM_SHARED) {
-               ret = shmem_zero_setup(vma);
-               if (ret) {
-                       fput(asma->file);
-                       goto out;
-               }
+       if (vma->vm_flags & VM_SHARED)
+               shmem_set_file(vma, asma->file);
+       else {
+               if (vma->vm_file)
+                       fput(vma->vm_file);
+               vma->vm_file = asma->file;
        }
 
-       if (vma->vm_file)
-               fput(vma->vm_file);
-       vma->vm_file = asma->file;
-
 out:
        mutex_unlock(&ashmem_mutex);
        return ret;
@@ -441,14 +433,16 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        if (!(sc->gfp_mask & __GFP_FS))
                return SHRINK_STOP;
 
-       mutex_lock(&ashmem_mutex);
+       if (!mutex_trylock(&ashmem_mutex))
+               return -1;
+
        list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
                loff_t start = range->pgstart * PAGE_SIZE;
                loff_t end = (range->pgend + 1) * PAGE_SIZE;
 
-               vfs_fallocate(range->asma->file,
-                             FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
-                             start, end - start);
+               range->asma->file->f_op->fallocate(range->asma->file,
+                               FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+                               start, end - start);
                range->purged = ASHMEM_WAS_PURGED;
                lru_del(range);
 
diff --git a/drivers/staging/android/fiq_debugger/Kconfig b/drivers/staging/android/fiq_debugger/Kconfig
new file mode 100644 (file)
index 0000000..60fc224
--- /dev/null
@@ -0,0 +1,58 @@
+config FIQ_DEBUGGER
+       bool "FIQ Mode Serial Debugger"
+       default n
+       depends on ARM || ARM64
+       help
+         The FIQ serial debugger can accept commands even when the
+         kernel is unresponsive due to being stuck with interrupts
+         disabled.
+
+config FIQ_DEBUGGER_NO_SLEEP
+       bool "Keep serial debugger active"
+       depends on FIQ_DEBUGGER
+       default n
+       help
+         Enables the serial debugger at boot. Passing
+         fiq_debugger.no_sleep on the kernel commandline will
+         override this config option.
+
+config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+       bool "Don't disable wakeup IRQ when debugger is active"
+       depends on FIQ_DEBUGGER
+       default n
+       help
+         Don't disable the wakeup irq when enabling the uart clock.  This will
+         cause extra interrupts, but it makes the serial debugger usable with
+         on some MSM radio builds that ignore the uart clock request in power
+         collapse.
+
+config FIQ_DEBUGGER_CONSOLE
+       bool "Console on FIQ Serial Debugger port"
+       depends on FIQ_DEBUGGER
+       default n
+       help
+         Enables a console so that printk messages are displayed on
+         the debugger serial port as the occur.
+
+config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+       bool "Put the FIQ debugger into console mode by default"
+       depends on FIQ_DEBUGGER_CONSOLE
+       default n
+       help
+         If enabled, this puts the fiq debugger into console mode by default.
+         Otherwise, the fiq debugger will start out in debug mode.
+
+config FIQ_DEBUGGER_UART_OVERLAY
+       bool "Install uart DT overlay"
+       depends on FIQ_DEBUGGER
+       select OF_OVERLAY
+       default n
+       help
+         If enabled, fiq debugger is calling fiq_debugger_uart_overlay()
+         that will apply overlay uart_overlay@0 to disable proper uart.
+
+config FIQ_WATCHDOG
+       bool
+       select FIQ_DEBUGGER
+       select PSTORE_RAM
+       default n
diff --git a/drivers/staging/android/fiq_debugger/Makefile b/drivers/staging/android/fiq_debugger/Makefile
new file mode 100644 (file)
index 0000000..a7ca487
--- /dev/null
@@ -0,0 +1,4 @@
+obj-y                  += fiq_debugger.o
+obj-$(CONFIG_ARM)      += fiq_debugger_arm.o
+obj-$(CONFIG_ARM64)    += fiq_debugger_arm64.o
+obj-$(CONFIG_FIQ_WATCHDOG)     += fiq_watchdog.o
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c
new file mode 100644 (file)
index 0000000..b132cff
--- /dev/null
@@ -0,0 +1,1248 @@
+/*
+ * drivers/staging/android/fiq_debugger.c
+ *
+ * Serial Debugger Interface accessed through an FIQ interrupt.
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/kernel_stat.h>
+#include <linux/kmsg_dump.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/wakelock.h>
+
+#ifdef CONFIG_FIQ_GLUE
+#include <asm/fiq_glue.h>
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_UART_OVERLAY
+#include <linux/of.h>
+#endif
+
+#include <linux/uaccess.h>
+
+#include "fiq_debugger.h"
+#include "fiq_debugger_priv.h"
+#include "fiq_debugger_ringbuf.h"
+
+#define DEBUG_MAX 64
+#define MAX_UNHANDLED_FIQ_COUNT 1000000
+
+#define MAX_FIQ_DEBUGGER_PORTS 4
+
+struct fiq_debugger_state {
+#ifdef CONFIG_FIQ_GLUE
+       struct fiq_glue_handler handler;
+#endif
+       struct fiq_debugger_output output;
+
+       int fiq;
+       int uart_irq;
+       int signal_irq;
+       int wakeup_irq;
+       bool wakeup_irq_no_set_wake;
+       struct clk *clk;
+       struct fiq_debugger_pdata *pdata;
+       struct platform_device *pdev;
+
+       char debug_cmd[DEBUG_MAX];
+       int debug_busy;
+       int debug_abort;
+
+       char debug_buf[DEBUG_MAX];
+       int debug_count;
+
+       bool no_sleep;
+       bool debug_enable;
+       bool ignore_next_wakeup_irq;
+       struct timer_list sleep_timer;
+       spinlock_t sleep_timer_lock;
+       bool uart_enabled;
+       struct wake_lock debugger_wake_lock;
+       bool console_enable;
+       int current_cpu;
+       atomic_t unhandled_fiq_count;
+       bool in_fiq;
+
+       struct work_struct work;
+       spinlock_t work_lock;
+       char work_cmd[DEBUG_MAX];
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+       spinlock_t console_lock;
+       struct console console;
+       struct tty_port tty_port;
+       struct fiq_debugger_ringbuf *tty_rbuf;
+       bool syslog_dumping;
+#endif
+
+       unsigned int last_irqs[NR_IRQS];
+       unsigned int last_local_timer_irqs[NR_CPUS];
+};
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+struct tty_driver *fiq_tty_driver;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP
+static bool initial_no_sleep = true;
+#else
+static bool initial_no_sleep;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+static bool initial_debug_enable = true;
+static bool initial_console_enable = true;
+#else
+static bool initial_debug_enable;
+static bool initial_console_enable;
+#endif
+
+static bool fiq_kgdb_enable;
+static bool fiq_debugger_disable;
+
+module_param_named(no_sleep, initial_no_sleep, bool, 0644);
+module_param_named(debug_enable, initial_debug_enable, bool, 0644);
+module_param_named(console_enable, initial_console_enable, bool, 0644);
+module_param_named(kgdb_enable, fiq_kgdb_enable, bool, 0644);
+module_param_named(disable, fiq_debugger_disable, bool, 0644);
+
+#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+static inline
+void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state) {}
+static inline
+void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state) {}
+#else
+static inline
+void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state)
+{
+       if (state->wakeup_irq < 0)
+               return;
+       enable_irq(state->wakeup_irq);
+       if (!state->wakeup_irq_no_set_wake)
+               enable_irq_wake(state->wakeup_irq);
+}
+static inline
+void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state)
+{
+       if (state->wakeup_irq < 0)
+               return;
+       disable_irq_nosync(state->wakeup_irq);
+       if (!state->wakeup_irq_no_set_wake)
+               disable_irq_wake(state->wakeup_irq);
+}
+#endif
+
+static inline bool fiq_debugger_have_fiq(struct fiq_debugger_state *state)
+{
+       return (state->fiq >= 0);
+}
+
+#ifdef CONFIG_FIQ_GLUE
+static void fiq_debugger_force_irq(struct fiq_debugger_state *state)
+{
+       unsigned int irq = state->signal_irq;
+
+       if (WARN_ON(!fiq_debugger_have_fiq(state)))
+               return;
+       if (state->pdata->force_irq) {
+               state->pdata->force_irq(state->pdev, irq);
+       } else {
+               struct irq_chip *chip = irq_get_chip(irq);
+               if (chip && chip->irq_retrigger)
+                       chip->irq_retrigger(irq_get_irq_data(irq));
+       }
+}
+#endif
+
+static void fiq_debugger_uart_enable(struct fiq_debugger_state *state)
+{
+       if (state->clk)
+               clk_enable(state->clk);
+       if (state->pdata->uart_enable)
+               state->pdata->uart_enable(state->pdev);
+}
+
+static void fiq_debugger_uart_disable(struct fiq_debugger_state *state)
+{
+       if (state->pdata->uart_disable)
+               state->pdata->uart_disable(state->pdev);
+       if (state->clk)
+               clk_disable(state->clk);
+}
+
+static void fiq_debugger_uart_flush(struct fiq_debugger_state *state)
+{
+       if (state->pdata->uart_flush)
+               state->pdata->uart_flush(state->pdev);
+}
+
+static void fiq_debugger_putc(struct fiq_debugger_state *state, char c)
+{
+       state->pdata->uart_putc(state->pdev, c);
+}
+
+static void fiq_debugger_puts(struct fiq_debugger_state *state, char *s)
+{
+       unsigned c;
+       while ((c = *s++)) {
+               if (c == '\n')
+                       fiq_debugger_putc(state, '\r');
+               fiq_debugger_putc(state, c);
+       }
+}
+
+static void fiq_debugger_prompt(struct fiq_debugger_state *state)
+{
+       fiq_debugger_puts(state, "debug> ");
+}
+
+static void fiq_debugger_dump_kernel_log(struct fiq_debugger_state *state)
+{
+       char buf[512];
+       size_t len;
+       struct kmsg_dumper dumper = { .active = true };
+
+
+       kmsg_dump_rewind_nolock(&dumper);
+       while (kmsg_dump_get_line_nolock(&dumper, true, buf,
+                                        sizeof(buf) - 1, &len)) {
+               buf[len] = 0;
+               fiq_debugger_puts(state, buf);
+       }
+}
+
+static void fiq_debugger_printf(struct fiq_debugger_output *output,
+                              const char *fmt, ...)
+{
+       struct fiq_debugger_state *state;
+       char buf[256];
+       va_list ap;
+
+       state = container_of(output, struct fiq_debugger_state, output);
+       va_start(ap, fmt);
+       vsnprintf(buf, sizeof(buf), fmt, ap);
+       va_end(ap);
+
+       fiq_debugger_puts(state, buf);
+}
+
+/* Safe outside fiq context */
+static int fiq_debugger_printf_nfiq(void *cookie, const char *fmt, ...)
+{
+       struct fiq_debugger_state *state = cookie;
+       char buf[256];
+       va_list ap;
+       unsigned long irq_flags;
+
+       va_start(ap, fmt);
+       vsnprintf(buf, 128, fmt, ap);
+       va_end(ap);
+
+       local_irq_save(irq_flags);
+       fiq_debugger_puts(state, buf);
+       fiq_debugger_uart_flush(state);
+       local_irq_restore(irq_flags);
+       return state->debug_abort;
+}
+
+static void fiq_debugger_dump_irqs(struct fiq_debugger_state *state)
+{
+       int n;
+       struct irq_desc *desc;
+
+       fiq_debugger_printf(&state->output,
+                       "irqnr       total  since-last   status  name\n");
+       for_each_irq_desc(n, desc) {
+               struct irqaction *act = desc->action;
+               if (!act && !kstat_irqs(n))
+                       continue;
+               fiq_debugger_printf(&state->output, "%5d: %10u %11u %8x  %s\n", n,
+                       kstat_irqs(n),
+                       kstat_irqs(n) - state->last_irqs[n],
+                       desc->status_use_accessors,
+                       (act && act->name) ? act->name : "???");
+               state->last_irqs[n] = kstat_irqs(n);
+       }
+}
+
+static void fiq_debugger_do_ps(struct fiq_debugger_state *state)
+{
+       struct task_struct *g;
+       struct task_struct *p;
+       unsigned task_state;
+       static const char stat_nam[] = "RSDTtZX";
+
+       fiq_debugger_printf(&state->output, "pid   ppid  prio task            pc\n");
+       read_lock(&tasklist_lock);
+       do_each_thread(g, p) {
+               task_state = p->state ? __ffs(p->state) + 1 : 0;
+               fiq_debugger_printf(&state->output,
+                            "%5d %5d %4d ", p->pid, p->parent->pid, p->prio);
+               fiq_debugger_printf(&state->output, "%-13.13s %c", p->comm,
+                            task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]);
+               if (task_state == TASK_RUNNING)
+                       fiq_debugger_printf(&state->output, " running\n");
+               else
+                       fiq_debugger_printf(&state->output, " %08lx\n",
+                                       thread_saved_pc(p));
+       } while_each_thread(g, p);
+       read_unlock(&tasklist_lock);
+}
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state)
+{
+       state->syslog_dumping = true;
+}
+
+static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state)
+{
+       state->syslog_dumping = false;
+}
+#else
+extern int do_syslog(int type, char __user *bug, int count);
+static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state)
+{
+       do_syslog(5 /* clear */, NULL, 0);
+}
+
+static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state)
+{
+       fiq_debugger_dump_kernel_log(state);
+}
+#endif
+
+static void fiq_debugger_do_sysrq(struct fiq_debugger_state *state, char rq)
+{
+       if ((rq == 'g' || rq == 'G') && !fiq_kgdb_enable) {
+               fiq_debugger_printf(&state->output, "sysrq-g blocked\n");
+               return;
+       }
+       fiq_debugger_begin_syslog_dump(state);
+       handle_sysrq(rq);
+       fiq_debugger_end_syslog_dump(state);
+}
+
+#ifdef CONFIG_KGDB
+static void fiq_debugger_do_kgdb(struct fiq_debugger_state *state)
+{
+       if (!fiq_kgdb_enable) {
+               fiq_debugger_printf(&state->output, "kgdb through fiq debugger not enabled\n");
+               return;
+       }
+
+       fiq_debugger_printf(&state->output, "enabling console and triggering kgdb\n");
+       state->console_enable = true;
+       handle_sysrq('g');
+}
+#endif
+
+static void fiq_debugger_schedule_work(struct fiq_debugger_state *state,
+               char *cmd)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&state->work_lock, flags);
+       if (state->work_cmd[0] != '\0') {
+               fiq_debugger_printf(&state->output, "work command processor busy\n");
+               spin_unlock_irqrestore(&state->work_lock, flags);
+               return;
+       }
+
+       strlcpy(state->work_cmd, cmd, sizeof(state->work_cmd));
+       spin_unlock_irqrestore(&state->work_lock, flags);
+
+       schedule_work(&state->work);
+}
+
+static void fiq_debugger_work(struct work_struct *work)
+{
+       struct fiq_debugger_state *state;
+       char work_cmd[DEBUG_MAX];
+       char *cmd;
+       unsigned long flags;
+
+       state = container_of(work, struct fiq_debugger_state, work);
+
+       spin_lock_irqsave(&state->work_lock, flags);
+
+       strlcpy(work_cmd, state->work_cmd, sizeof(work_cmd));
+       state->work_cmd[0] = '\0';
+
+       spin_unlock_irqrestore(&state->work_lock, flags);
+
+       cmd = work_cmd;
+       if (!strncmp(cmd, "reboot", 6)) {
+               cmd += 6;
+               while (*cmd == ' ')
+                       cmd++;
+               if (cmd != '\0')
+                       kernel_restart(cmd);
+               else
+                       kernel_restart(NULL);
+       } else {
+               fiq_debugger_printf(&state->output, "unknown work command '%s'\n",
+                               work_cmd);
+       }
+}
+
+/* This function CANNOT be called in FIQ context */
+static void fiq_debugger_irq_exec(struct fiq_debugger_state *state, char *cmd)
+{
+       if (!strcmp(cmd, "ps"))
+               fiq_debugger_do_ps(state);
+       if (!strcmp(cmd, "sysrq"))
+               fiq_debugger_do_sysrq(state, 'h');
+       if (!strncmp(cmd, "sysrq ", 6))
+               fiq_debugger_do_sysrq(state, cmd[6]);
+#ifdef CONFIG_KGDB
+       if (!strcmp(cmd, "kgdb"))
+               fiq_debugger_do_kgdb(state);
+#endif
+       if (!strncmp(cmd, "reboot", 6))
+               fiq_debugger_schedule_work(state, cmd);
+}
+
+static void fiq_debugger_help(struct fiq_debugger_state *state)
+{
+       fiq_debugger_printf(&state->output,
+                               "FIQ Debugger commands:\n"
+                               " pc            PC status\n"
+                               " regs          Register dump\n"
+                               " allregs       Extended Register dump\n"
+                               " bt            Stack trace\n"
+                               " reboot [<c>]  Reboot with command <c>\n"
+                               " reset [<c>]   Hard reset with command <c>\n"
+                               " irqs          Interupt status\n"
+                               " kmsg          Kernel log\n"
+                               " version       Kernel version\n");
+       fiq_debugger_printf(&state->output,
+                               " sleep         Allow sleep while in FIQ\n"
+                               " nosleep       Disable sleep while in FIQ\n"
+                               " console       Switch terminal to console\n"
+                               " cpu           Current CPU\n"
+                               " cpu <number>  Switch to CPU<number>\n");
+       fiq_debugger_printf(&state->output,
+                               " ps            Process list\n"
+                               " sysrq         sysrq options\n"
+                               " sysrq <param> Execute sysrq with <param>\n");
+#ifdef CONFIG_KGDB
+       fiq_debugger_printf(&state->output,
+                               " kgdb          Enter kernel debugger\n");
+#endif
+}
+
+static void fiq_debugger_take_affinity(void *info)
+{
+       struct fiq_debugger_state *state = info;
+       struct cpumask cpumask;
+
+       cpumask_clear(&cpumask);
+       cpumask_set_cpu(get_cpu(), &cpumask);
+
+       irq_set_affinity(state->uart_irq, &cpumask);
+}
+
+static void fiq_debugger_switch_cpu(struct fiq_debugger_state *state, int cpu)
+{
+       if (!fiq_debugger_have_fiq(state))
+               smp_call_function_single(cpu, fiq_debugger_take_affinity, state,
+                               false);
+       state->current_cpu = cpu;
+}
+
+static bool fiq_debugger_fiq_exec(struct fiq_debugger_state *state,
+                       const char *cmd, const struct pt_regs *regs,
+                       void *svc_sp)
+{
+       bool signal_helper = false;
+
+       if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) {
+               fiq_debugger_help(state);
+       } else if (!strcmp(cmd, "pc")) {
+               fiq_debugger_dump_pc(&state->output, regs);
+       } else if (!strcmp(cmd, "regs")) {
+               fiq_debugger_dump_regs(&state->output, regs);
+       } else if (!strcmp(cmd, "allregs")) {
+               fiq_debugger_dump_allregs(&state->output, regs);
+       } else if (!strcmp(cmd, "bt")) {
+               fiq_debugger_dump_stacktrace(&state->output, regs, 100, svc_sp);
+       } else if (!strncmp(cmd, "reset", 5)) {
+               cmd += 5;
+               while (*cmd == ' ')
+                       cmd++;
+               if (*cmd) {
+                       char tmp_cmd[32];
+                       strlcpy(tmp_cmd, cmd, sizeof(tmp_cmd));
+                       machine_restart(tmp_cmd);
+               } else {
+                       machine_restart(NULL);
+               }
+       } else if (!strcmp(cmd, "irqs")) {
+               fiq_debugger_dump_irqs(state);
+       } else if (!strcmp(cmd, "kmsg")) {
+               fiq_debugger_dump_kernel_log(state);
+       } else if (!strcmp(cmd, "version")) {
+               fiq_debugger_printf(&state->output, "%s\n", linux_banner);
+       } else if (!strcmp(cmd, "sleep")) {
+               state->no_sleep = false;
+               fiq_debugger_printf(&state->output, "enabling sleep\n");
+       } else if (!strcmp(cmd, "nosleep")) {
+               state->no_sleep = true;
+               fiq_debugger_printf(&state->output, "disabling sleep\n");
+       } else if (!strcmp(cmd, "console")) {
+               fiq_debugger_printf(&state->output, "console mode\n");
+               fiq_debugger_uart_flush(state);
+               state->console_enable = true;
+       } else if (!strcmp(cmd, "cpu")) {
+               fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu);
+       } else if (!strncmp(cmd, "cpu ", 4)) {
+               unsigned long cpu = 0;
+               if (kstrtoul(cmd + 4, 10, &cpu) == 0)
+                       fiq_debugger_switch_cpu(state, cpu);
+               else
+                       fiq_debugger_printf(&state->output, "invalid cpu\n");
+               fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu);
+       } else {
+               if (state->debug_busy) {
+                       fiq_debugger_printf(&state->output,
+                               "command processor busy. trying to abort.\n");
+                       state->debug_abort = -1;
+               } else {
+                       strcpy(state->debug_cmd, cmd);
+                       state->debug_busy = 1;
+               }
+
+               return true;
+       }
+       if (!state->console_enable)
+               fiq_debugger_prompt(state);
+
+       return signal_helper;
+}
+
+static void fiq_debugger_sleep_timer_expired(unsigned long data)
+{
+       struct fiq_debugger_state *state = (struct fiq_debugger_state *)data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&state->sleep_timer_lock, flags);
+       if (state->uart_enabled && !state->no_sleep) {
+               if (state->debug_enable && !state->console_enable) {
+                       state->debug_enable = false;
+                       fiq_debugger_printf_nfiq(state,
+                                       "suspending fiq debugger\n");
+               }
+               state->ignore_next_wakeup_irq = true;
+               fiq_debugger_uart_disable(state);
+               state->uart_enabled = false;
+               fiq_debugger_enable_wakeup_irq(state);
+       }
+       wake_unlock(&state->debugger_wake_lock);
+       spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static void fiq_debugger_handle_wakeup(struct fiq_debugger_state *state)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&state->sleep_timer_lock, flags);
+       if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) {
+               state->ignore_next_wakeup_irq = false;
+       } else if (!state->uart_enabled) {
+               wake_lock(&state->debugger_wake_lock);
+               fiq_debugger_uart_enable(state);
+               state->uart_enabled = true;
+               fiq_debugger_disable_wakeup_irq(state);
+               mod_timer(&state->sleep_timer, jiffies + HZ / 2);
+       }
+       spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static irqreturn_t fiq_debugger_wakeup_irq_handler(int irq, void *dev)
+{
+       struct fiq_debugger_state *state = dev;
+
+       if (!state->no_sleep)
+               fiq_debugger_puts(state, "WAKEUP\n");
+       fiq_debugger_handle_wakeup(state);
+
+       return IRQ_HANDLED;
+}
+
+static
+void fiq_debugger_handle_console_irq_context(struct fiq_debugger_state *state)
+{
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+       if (state->tty_port.ops) {
+               int i;
+               int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+               for (i = 0; i < count; i++) {
+                       int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+                       tty_insert_flip_char(&state->tty_port, c, TTY_NORMAL);
+                       if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1))
+                               pr_warn("fiq tty failed to consume byte\n");
+               }
+               tty_flip_buffer_push(&state->tty_port);
+       }
+#endif
+}
+
+static void fiq_debugger_handle_irq_context(struct fiq_debugger_state *state)
+{
+       if (!state->no_sleep) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&state->sleep_timer_lock, flags);
+               wake_lock(&state->debugger_wake_lock);
+               mod_timer(&state->sleep_timer, jiffies + HZ * 5);
+               spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+       }
+       fiq_debugger_handle_console_irq_context(state);
+       if (state->debug_busy) {
+               fiq_debugger_irq_exec(state, state->debug_cmd);
+               if (!state->console_enable)
+                       fiq_debugger_prompt(state);
+               state->debug_busy = 0;
+       }
+}
+
+static int fiq_debugger_getc(struct fiq_debugger_state *state)
+{
+       return state->pdata->uart_getc(state->pdev);
+}
+
+static bool fiq_debugger_handle_uart_interrupt(struct fiq_debugger_state *state,
+                       int this_cpu, const struct pt_regs *regs, void *svc_sp)
+{
+       int c;
+       static int last_c;
+       int count = 0;
+       bool signal_helper = false;
+
+       if (this_cpu != state->current_cpu) {
+               if (state->in_fiq)
+                       return false;
+
+               if (atomic_inc_return(&state->unhandled_fiq_count) !=
+                                       MAX_UNHANDLED_FIQ_COUNT)
+                       return false;
+
+               fiq_debugger_printf(&state->output,
+                       "fiq_debugger: cpu %d not responding, "
+                       "reverting to cpu %d\n", state->current_cpu,
+                       this_cpu);
+
+               atomic_set(&state->unhandled_fiq_count, 0);
+               fiq_debugger_switch_cpu(state, this_cpu);
+               return false;
+       }
+
+       state->in_fiq = true;
+
+       while ((c = fiq_debugger_getc(state)) != FIQ_DEBUGGER_NO_CHAR) {
+               count++;
+               if (!state->debug_enable) {
+                       if ((c == 13) || (c == 10)) {
+                               state->debug_enable = true;
+                               state->debug_count = 0;
+                               fiq_debugger_prompt(state);
+                       }
+               } else if (c == FIQ_DEBUGGER_BREAK) {
+                       state->console_enable = false;
+                       fiq_debugger_puts(state, "fiq debugger mode\n");
+                       state->debug_count = 0;
+                       fiq_debugger_prompt(state);
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+               } else if (state->console_enable && state->tty_rbuf) {
+                       fiq_debugger_ringbuf_push(state->tty_rbuf, c);
+                       signal_helper = true;
+#endif
+               } else if ((c >= ' ') && (c < 127)) {
+                       if (state->debug_count < (DEBUG_MAX - 1)) {
+                               state->debug_buf[state->debug_count++] = c;
+                               fiq_debugger_putc(state, c);
+                       }
+               } else if ((c == 8) || (c == 127)) {
+                       if (state->debug_count > 0) {
+                               state->debug_count--;
+                               fiq_debugger_putc(state, 8);
+                               fiq_debugger_putc(state, ' ');
+                               fiq_debugger_putc(state, 8);
+                       }
+               } else if ((c == 13) || (c == 10)) {
+                       if (c == '\r' || (c == '\n' && last_c != '\r')) {
+                               fiq_debugger_putc(state, '\r');
+                               fiq_debugger_putc(state, '\n');
+                       }
+                       if (state->debug_count) {
+                               state->debug_buf[state->debug_count] = 0;
+                               state->debug_count = 0;
+                               signal_helper |=
+                                       fiq_debugger_fiq_exec(state,
+                                                       state->debug_buf,
+                                                       regs, svc_sp);
+                       } else {
+                               fiq_debugger_prompt(state);
+                       }
+               }
+               last_c = c;
+       }
+       if (!state->console_enable)
+               fiq_debugger_uart_flush(state);
+       if (state->pdata->fiq_ack)
+               state->pdata->fiq_ack(state->pdev, state->fiq);
+
+       /* poke sleep timer if necessary */
+       if (state->debug_enable && !state->no_sleep)
+               signal_helper = true;
+
+       atomic_set(&state->unhandled_fiq_count, 0);
+       state->in_fiq = false;
+
+       return signal_helper;
+}
+
+#ifdef CONFIG_FIQ_GLUE
+static void fiq_debugger_fiq(struct fiq_glue_handler *h,
+               const struct pt_regs *regs, void *svc_sp)
+{
+       struct fiq_debugger_state *state =
+               container_of(h, struct fiq_debugger_state, handler);
+       unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu;
+       bool need_irq;
+
+       need_irq = fiq_debugger_handle_uart_interrupt(state, this_cpu, regs,
+                       svc_sp);
+       if (need_irq)
+               fiq_debugger_force_irq(state);
+}
+#endif
+
+/*
+ * When not using FIQs, we only use this single interrupt as an entry point.
+ * This just effectively takes over the UART interrupt and does all the work
+ * in this context.
+ */
+static irqreturn_t fiq_debugger_uart_irq(int irq, void *dev)
+{
+       struct fiq_debugger_state *state = dev;
+       bool not_done;
+
+       fiq_debugger_handle_wakeup(state);
+
+       /* handle the debugger irq in regular context */
+       not_done = fiq_debugger_handle_uart_interrupt(state, smp_processor_id(),
+                                             get_irq_regs(),
+                                             current_thread_info());
+       if (not_done)
+               fiq_debugger_handle_irq_context(state);
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * If FIQs are used, not everything can happen in fiq context.
+ * FIQ handler does what it can and then signals this interrupt to finish the
+ * job in irq context.
+ */
+static irqreturn_t fiq_debugger_signal_irq(int irq, void *dev)
+{
+       struct fiq_debugger_state *state = dev;
+
+       if (state->pdata->force_irq_ack)
+               state->pdata->force_irq_ack(state->pdev, state->signal_irq);
+
+       fiq_debugger_handle_irq_context(state);
+
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_FIQ_GLUE
+static void fiq_debugger_resume(struct fiq_glue_handler *h)
+{
+       struct fiq_debugger_state *state =
+               container_of(h, struct fiq_debugger_state, handler);
+       if (state->pdata->uart_resume)
+               state->pdata->uart_resume(state->pdev);
+}
+#endif
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+struct tty_driver *fiq_debugger_console_device(struct console *co, int *index)
+{
+       *index = co->index;
+       return fiq_tty_driver;
+}
+
+static void fiq_debugger_console_write(struct console *co,
+                               const char *s, unsigned int count)
+{
+       struct fiq_debugger_state *state;
+       unsigned long flags;
+
+       state = container_of(co, struct fiq_debugger_state, console);
+
+       if (!state->console_enable && !state->syslog_dumping)
+               return;
+
+       fiq_debugger_uart_enable(state);
+       spin_lock_irqsave(&state->console_lock, flags);
+       while (count--) {
+               if (*s == '\n')
+                       fiq_debugger_putc(state, '\r');
+               fiq_debugger_putc(state, *s++);
+       }
+       fiq_debugger_uart_flush(state);
+       spin_unlock_irqrestore(&state->console_lock, flags);
+       fiq_debugger_uart_disable(state);
+}
+
+static struct console fiq_debugger_console = {
+       .name = "ttyFIQ",
+       .device = fiq_debugger_console_device,
+       .write = fiq_debugger_console_write,
+       .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED,
+};
+
+int fiq_tty_open(struct tty_struct *tty, struct file *filp)
+{
+       int line = tty->index;
+       struct fiq_debugger_state **states = tty->driver->driver_state;
+       struct fiq_debugger_state *state = states[line];
+
+       return tty_port_open(&state->tty_port, tty, filp);
+}
+
+void fiq_tty_close(struct tty_struct *tty, struct file *filp)
+{
+       tty_port_close(tty->port, tty, filp);
+}
+
+int  fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+       int i;
+       int line = tty->index;
+       struct fiq_debugger_state **states = tty->driver->driver_state;
+       struct fiq_debugger_state *state = states[line];
+
+       if (!state->console_enable)
+               return count;
+
+       fiq_debugger_uart_enable(state);
+       spin_lock_irq(&state->console_lock);
+       for (i = 0; i < count; i++)
+               fiq_debugger_putc(state, *buf++);
+       spin_unlock_irq(&state->console_lock);
+       fiq_debugger_uart_disable(state);
+
+       return count;
+}
+
+int  fiq_tty_write_room(struct tty_struct *tty)
+{
+       return 16;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int fiq_tty_poll_init(struct tty_driver *driver, int line, char *options)
+{
+       return 0;
+}
+
+static int fiq_tty_poll_get_char(struct tty_driver *driver, int line)
+{
+       struct fiq_debugger_state **states = driver->driver_state;
+       struct fiq_debugger_state *state = states[line];
+       int c = NO_POLL_CHAR;
+
+       fiq_debugger_uart_enable(state);
+       if (fiq_debugger_have_fiq(state)) {
+               int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+               if (count > 0) {
+                       c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+                       fiq_debugger_ringbuf_consume(state->tty_rbuf, 1);
+               }
+       } else {
+               c = fiq_debugger_getc(state);
+               if (c == FIQ_DEBUGGER_NO_CHAR)
+                       c = NO_POLL_CHAR;
+       }
+       fiq_debugger_uart_disable(state);
+
+       return c;
+}
+
+static void fiq_tty_poll_put_char(struct tty_driver *driver, int line, char ch)
+{
+       struct fiq_debugger_state **states = driver->driver_state;
+       struct fiq_debugger_state *state = states[line];
+       fiq_debugger_uart_enable(state);
+       fiq_debugger_putc(state, ch);
+       fiq_debugger_uart_disable(state);
+}
+#endif
+
+static const struct tty_port_operations fiq_tty_port_ops;
+
+static const struct tty_operations fiq_tty_driver_ops = {
+       .write = fiq_tty_write,
+       .write_room = fiq_tty_write_room,
+       .open = fiq_tty_open,
+       .close = fiq_tty_close,
+#ifdef CONFIG_CONSOLE_POLL
+       .poll_init = fiq_tty_poll_init,
+       .poll_get_char = fiq_tty_poll_get_char,
+       .poll_put_char = fiq_tty_poll_put_char,
+#endif
+};
+
+static int fiq_debugger_tty_init(void)
+{
+       int ret;
+       struct fiq_debugger_state **states = NULL;
+
+       states = kzalloc(sizeof(*states) * MAX_FIQ_DEBUGGER_PORTS, GFP_KERNEL);
+       if (!states) {
+               pr_err("Failed to allocate fiq debugger state structres\n");
+               return -ENOMEM;
+       }
+
+       fiq_tty_driver = alloc_tty_driver(MAX_FIQ_DEBUGGER_PORTS);
+       if (!fiq_tty_driver) {
+               pr_err("Failed to allocate fiq debugger tty\n");
+               ret = -ENOMEM;
+               goto err_free_state;
+       }
+
+       fiq_tty_driver->owner           = THIS_MODULE;
+       fiq_tty_driver->driver_name     = "fiq-debugger";
+       fiq_tty_driver->name            = "ttyFIQ";
+       fiq_tty_driver->type            = TTY_DRIVER_TYPE_SERIAL;
+       fiq_tty_driver->subtype         = SERIAL_TYPE_NORMAL;
+       fiq_tty_driver->init_termios    = tty_std_termios;
+       fiq_tty_driver->flags           = TTY_DRIVER_REAL_RAW |
+                                         TTY_DRIVER_DYNAMIC_DEV;
+       fiq_tty_driver->driver_state    = states;
+
+       fiq_tty_driver->init_termios.c_cflag =
+                                       B115200 | CS8 | CREAD | HUPCL | CLOCAL;
+       fiq_tty_driver->init_termios.c_ispeed = 115200;
+       fiq_tty_driver->init_termios.c_ospeed = 115200;
+
+       tty_set_operations(fiq_tty_driver, &fiq_tty_driver_ops);
+
+       ret = tty_register_driver(fiq_tty_driver);
+       if (ret) {
+               pr_err("Failed to register fiq tty: %d\n", ret);
+               goto err_free_tty;
+       }
+
+       pr_info("Registered FIQ tty driver\n");
+       return 0;
+
+err_free_tty:
+       put_tty_driver(fiq_tty_driver);
+       fiq_tty_driver = NULL;
+err_free_state:
+       kfree(states);
+       return ret;
+}
+
+static int fiq_debugger_tty_init_one(struct fiq_debugger_state *state)
+{
+       int ret;
+       struct device *tty_dev;
+       struct fiq_debugger_state **states = fiq_tty_driver->driver_state;
+
+       states[state->pdev->id] = state;
+
+       state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024);
+       if (!state->tty_rbuf) {
+               pr_err("Failed to allocate fiq debugger ringbuf\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       tty_port_init(&state->tty_port);
+       state->tty_port.ops = &fiq_tty_port_ops;
+
+       tty_dev = tty_port_register_device(&state->tty_port, fiq_tty_driver,
+                                          state->pdev->id, &state->pdev->dev);
+       if (IS_ERR(tty_dev)) {
+               pr_err("Failed to register fiq debugger tty device\n");
+               ret = PTR_ERR(tty_dev);
+               goto err;
+       }
+
+       device_set_wakeup_capable(tty_dev, 1);
+
+       pr_info("Registered fiq debugger ttyFIQ%d\n", state->pdev->id);
+
+       return 0;
+
+err:
+       fiq_debugger_ringbuf_free(state->tty_rbuf);
+       state->tty_rbuf = NULL;
+       return ret;
+}
+#endif
+
+static int fiq_debugger_dev_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+       if (state->pdata->uart_dev_suspend)
+               return state->pdata->uart_dev_suspend(pdev);
+       return 0;
+}
+
+static int fiq_debugger_dev_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+       if (state->pdata->uart_dev_resume)
+               return state->pdata->uart_dev_resume(pdev);
+       return 0;
+}
+
+static int fiq_debugger_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev);
+       struct fiq_debugger_state *state;
+       int fiq;
+       int uart_irq;
+
+       if (pdev->id >= MAX_FIQ_DEBUGGER_PORTS)
+               return -EINVAL;
+
+       if (!pdata->uart_getc || !pdata->uart_putc)
+               return -EINVAL;
+       if ((pdata->uart_enable && !pdata->uart_disable) ||
+           (!pdata->uart_enable && pdata->uart_disable))
+               return -EINVAL;
+
+       fiq = platform_get_irq_byname(pdev, "fiq");
+       uart_irq = platform_get_irq_byname(pdev, "uart_irq");
+
+       /* uart_irq mode and fiq mode are mutually exclusive, but one of them
+        * is required */
+       if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0))
+               return -EINVAL;
+       if (fiq >= 0 && !pdata->fiq_enable)
+               return -EINVAL;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       state->output.printf = fiq_debugger_printf;
+       setup_timer(&state->sleep_timer, fiq_debugger_sleep_timer_expired,
+                   (unsigned long)state);
+       state->pdata = pdata;
+       state->pdev = pdev;
+       state->no_sleep = initial_no_sleep;
+       state->debug_enable = initial_debug_enable;
+       state->console_enable = initial_console_enable;
+
+       state->fiq = fiq;
+       state->uart_irq = uart_irq;
+       state->signal_irq = platform_get_irq_byname(pdev, "signal");
+       state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup");
+
+       INIT_WORK(&state->work, fiq_debugger_work);
+       spin_lock_init(&state->work_lock);
+
+       platform_set_drvdata(pdev, state);
+
+       spin_lock_init(&state->sleep_timer_lock);
+
+       if (state->wakeup_irq < 0 && fiq_debugger_have_fiq(state))
+               state->no_sleep = true;
+       state->ignore_next_wakeup_irq = !state->no_sleep;
+
+       wake_lock_init(&state->debugger_wake_lock,
+                       WAKE_LOCK_SUSPEND, "serial-debug");
+
+       state->clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(state->clk))
+               state->clk = NULL;
+
+       /* do not call pdata->uart_enable here since uart_init may still
+        * need to do some initialization before uart_enable can work.
+        * So, only try to manage the clock during init.
+        */
+       if (state->clk)
+               clk_enable(state->clk);
+
+       if (pdata->uart_init) {
+               ret = pdata->uart_init(pdev);
+               if (ret)
+                       goto err_uart_init;
+       }
+
+       fiq_debugger_printf_nfiq(state,
+                               "<hit enter %sto activate fiq debugger>\n",
+                               state->no_sleep ? "" : "twice ");
+
+#ifdef CONFIG_FIQ_GLUE
+       if (fiq_debugger_have_fiq(state)) {
+               state->handler.fiq = fiq_debugger_fiq;
+               state->handler.resume = fiq_debugger_resume;
+               ret = fiq_glue_register_handler(&state->handler);
+               if (ret) {
+                       pr_err("%s: could not install fiq handler\n", __func__);
+                       goto err_register_irq;
+               }
+
+               pdata->fiq_enable(pdev, state->fiq, 1);
+       } else
+#endif
+       {
+               ret = request_irq(state->uart_irq, fiq_debugger_uart_irq,
+                                 IRQF_NO_SUSPEND, "debug", state);
+               if (ret) {
+                       pr_err("%s: could not install irq handler\n", __func__);
+                       goto err_register_irq;
+               }
+
+               /* for irq-only mode, we want this irq to wake us up, if it
+                * can.
+                */
+               enable_irq_wake(state->uart_irq);
+       }
+
+       if (state->clk)
+               clk_disable(state->clk);
+
+       if (state->signal_irq >= 0) {
+               ret = request_irq(state->signal_irq, fiq_debugger_signal_irq,
+                         IRQF_TRIGGER_RISING, "debug-signal", state);
+               if (ret)
+                       pr_err("serial_debugger: could not install signal_irq");
+       }
+
+       if (state->wakeup_irq >= 0) {
+               ret = request_irq(state->wakeup_irq,
+                                 fiq_debugger_wakeup_irq_handler,
+                                 IRQF_TRIGGER_FALLING,
+                                 "debug-wakeup", state);
+               if (ret) {
+                       pr_err("serial_debugger: "
+                               "could not install wakeup irq\n");
+                       state->wakeup_irq = -1;
+               } else {
+                       ret = enable_irq_wake(state->wakeup_irq);
+                       if (ret) {
+                               pr_err("serial_debugger: "
+                                       "could not enable wakeup\n");
+                               state->wakeup_irq_no_set_wake = true;
+                       }
+               }
+       }
+       if (state->no_sleep)
+               fiq_debugger_handle_wakeup(state);
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+       spin_lock_init(&state->console_lock);
+       state->console = fiq_debugger_console;
+       state->console.index = pdev->id;
+       if (!console_set_on_cmdline)
+               add_preferred_console(state->console.name,
+                       state->console.index, NULL);
+       register_console(&state->console);
+       fiq_debugger_tty_init_one(state);
+#endif
+       return 0;
+
+err_register_irq:
+       if (pdata->uart_free)
+               pdata->uart_free(pdev);
+err_uart_init:
+       if (state->clk)
+               clk_disable(state->clk);
+       if (state->clk)
+               clk_put(state->clk);
+       wake_lock_destroy(&state->debugger_wake_lock);
+       platform_set_drvdata(pdev, NULL);
+       kfree(state);
+       return ret;
+}
+
+static const struct dev_pm_ops fiq_debugger_dev_pm_ops = {
+       .suspend        = fiq_debugger_dev_suspend,
+       .resume         = fiq_debugger_dev_resume,
+};
+
+static struct platform_driver fiq_debugger_driver = {
+       .probe  = fiq_debugger_probe,
+       .driver = {
+               .name   = "fiq_debugger",
+               .pm     = &fiq_debugger_dev_pm_ops,
+       },
+};
+
+#if defined(CONFIG_FIQ_DEBUGGER_UART_OVERLAY)
+int fiq_debugger_uart_overlay(void)
+{
+       struct device_node *onp = of_find_node_by_path("/uart_overlay@0");
+       int ret;
+
+       if (!onp) {
+               pr_err("serial_debugger: uart overlay not found\n");
+               return -ENODEV;
+       }
+
+       ret = of_overlay_create(onp);
+       if (ret < 0) {
+               pr_err("serial_debugger: fail to create overlay: %d\n", ret);
+               of_node_put(onp);
+               return ret;
+       }
+
+       pr_info("serial_debugger: uart overlay applied\n");
+       return 0;
+}
+#endif
+
+static int __init fiq_debugger_init(void)
+{
+       if (fiq_debugger_disable) {
+               pr_err("serial_debugger: disabled\n");
+               return -ENODEV;
+       }
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+       fiq_debugger_tty_init();
+#endif
+#if defined(CONFIG_FIQ_DEBUGGER_UART_OVERLAY)
+       fiq_debugger_uart_overlay();
+#endif
+       return platform_driver_register(&fiq_debugger_driver);
+}
+
+postcore_initcall(fiq_debugger_init);
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.h b/drivers/staging/android/fiq_debugger/fiq_debugger.h
new file mode 100644 (file)
index 0000000..c9ec4f8
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * drivers/staging/android/fiq_debugger/fiq_debugger.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+
+#include <linux/serial_core.h>
+
+#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR
+#define FIQ_DEBUGGER_BREAK 0x00ff0100
+
+#define FIQ_DEBUGGER_FIQ_IRQ_NAME      "fiq"
+#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME   "signal"
+#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME   "wakeup"
+
+/**
+ * struct fiq_debugger_pdata - fiq debugger platform data
+ * @uart_resume:       used to restore uart state right before enabling
+ *                     the fiq.
+ * @uart_enable:       Do the work necessary to communicate with the uart
+ *                     hw (enable clocks, etc.). This must be ref-counted.
+ * @uart_disable:      Do the work necessary to disable the uart hw
+ *                     (disable clocks, etc.). This must be ref-counted.
+ * @uart_dev_suspend:  called during PM suspend, generally not needed
+ *                     for real fiq mode debugger.
+ * @uart_dev_resume:   called during PM resume, generally not needed
+ *                     for real fiq mode debugger.
+ */
+struct fiq_debugger_pdata {
+       int (*uart_init)(struct platform_device *pdev);
+       void (*uart_free)(struct platform_device *pdev);
+       int (*uart_resume)(struct platform_device *pdev);
+       int (*uart_getc)(struct platform_device *pdev);
+       void (*uart_putc)(struct platform_device *pdev, unsigned int c);
+       void (*uart_flush)(struct platform_device *pdev);
+       void (*uart_enable)(struct platform_device *pdev);
+       void (*uart_disable)(struct platform_device *pdev);
+
+       int (*uart_dev_suspend)(struct platform_device *pdev);
+       int (*uart_dev_resume)(struct platform_device *pdev);
+
+       void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq,
+                                                               bool enable);
+       void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq);
+
+       void (*force_irq)(struct platform_device *pdev, unsigned int irq);
+       void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq);
+};
+
+#endif
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c
new file mode 100644 (file)
index 0000000..8b3e013
--- /dev/null
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+
+#include <asm/stacktrace.h>
+
+#include "fiq_debugger_priv.h"
+
+static char *mode_name(unsigned cpsr)
+{
+       switch (cpsr & MODE_MASK) {
+       case USR_MODE: return "USR";
+       case FIQ_MODE: return "FIQ";
+       case IRQ_MODE: return "IRQ";
+       case SVC_MODE: return "SVC";
+       case ABT_MODE: return "ABT";
+       case UND_MODE: return "UND";
+       case SYSTEM_MODE: return "SYS";
+       default: return "???";
+       }
+}
+
+void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+       output->printf(output, " pc %08x cpsr %08x mode %s\n",
+               regs->ARM_pc, regs->ARM_cpsr, mode_name(regs->ARM_cpsr));
+}
+
+void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+       output->printf(output,
+                       " r0 %08x  r1 %08x  r2 %08x  r3 %08x\n",
+                       regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
+       output->printf(output,
+                       " r4 %08x  r5 %08x  r6 %08x  r7 %08x\n",
+                       regs->ARM_r4, regs->ARM_r5, regs->ARM_r6, regs->ARM_r7);
+       output->printf(output,
+                       " r8 %08x  r9 %08x r10 %08x r11 %08x  mode %s\n",
+                       regs->ARM_r8, regs->ARM_r9, regs->ARM_r10, regs->ARM_fp,
+                       mode_name(regs->ARM_cpsr));
+       output->printf(output,
+                       " ip %08x  sp %08x  lr %08x  pc %08x cpsr %08x\n",
+                       regs->ARM_ip, regs->ARM_sp, regs->ARM_lr, regs->ARM_pc,
+                       regs->ARM_cpsr);
+}
+
+struct mode_regs {
+       unsigned long sp_svc;
+       unsigned long lr_svc;
+       unsigned long spsr_svc;
+
+       unsigned long sp_abt;
+       unsigned long lr_abt;
+       unsigned long spsr_abt;
+
+       unsigned long sp_und;
+       unsigned long lr_und;
+       unsigned long spsr_und;
+
+       unsigned long sp_irq;
+       unsigned long lr_irq;
+       unsigned long spsr_irq;
+
+       unsigned long r8_fiq;
+       unsigned long r9_fiq;
+       unsigned long r10_fiq;
+       unsigned long r11_fiq;
+       unsigned long r12_fiq;
+       unsigned long sp_fiq;
+       unsigned long lr_fiq;
+       unsigned long spsr_fiq;
+};
+
+static void __naked get_mode_regs(struct mode_regs *regs)
+{
+       asm volatile (
+       "mrs    r1, cpsr\n"
+       "msr    cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r8 - r14}\n"
+       "mrs    r2, spsr\n"
+       "stmia  r0!, {r2}\n"
+       "msr    cpsr_c, r1\n"
+       "bx     lr\n");
+}
+
+
+void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+       struct mode_regs mode_regs;
+       unsigned long mode = regs->ARM_cpsr & MODE_MASK;
+
+       fiq_debugger_dump_regs(output, regs);
+       get_mode_regs(&mode_regs);
+
+       output->printf(output,
+                       "%csvc: sp %08x  lr %08x  spsr %08x\n",
+                       mode == SVC_MODE ? '*' : ' ',
+                       mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc);
+       output->printf(output,
+                       "%cabt: sp %08x  lr %08x  spsr %08x\n",
+                       mode == ABT_MODE ? '*' : ' ',
+                       mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt);
+       output->printf(output,
+                       "%cund: sp %08x  lr %08x  spsr %08x\n",
+                       mode == UND_MODE ? '*' : ' ',
+                       mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und);
+       output->printf(output,
+                       "%cirq: sp %08x  lr %08x  spsr %08x\n",
+                       mode == IRQ_MODE ? '*' : ' ',
+                       mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq);
+       output->printf(output,
+                       "%cfiq: r8 %08x  r9 %08x  r10 %08x  r11 %08x  r12 %08x\n",
+                       mode == FIQ_MODE ? '*' : ' ',
+                       mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq,
+                       mode_regs.r11_fiq, mode_regs.r12_fiq);
+       output->printf(output,
+                       " fiq: sp %08x  lr %08x  spsr %08x\n",
+                       mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq);
+}
+
+struct stacktrace_state {
+       struct fiq_debugger_output *output;
+       unsigned int depth;
+};
+
+static int report_trace(struct stackframe *frame, void *d)
+{
+       struct stacktrace_state *sts = d;
+
+       if (sts->depth) {
+               sts->output->printf(sts->output,
+                       "  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+                       frame->pc, frame->pc, frame->lr, frame->lr,
+                       frame->sp, frame->fp);
+               sts->depth--;
+               return 0;
+       }
+       sts->output->printf(sts->output, "  ...\n");
+
+       return sts->depth == 0;
+}
+
+struct frame_tail {
+       struct frame_tail *fp;
+       unsigned long sp;
+       unsigned long lr;
+} __attribute__((packed));
+
+static struct frame_tail *user_backtrace(struct fiq_debugger_output *output,
+                                       struct frame_tail *tail)
+{
+       struct frame_tail buftail[2];
+
+       /* Also check accessibility of one struct frame_tail beyond */
+       if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) {
+               output->printf(output, "  invalid frame pointer %p\n",
+                               tail);
+               return NULL;
+       }
+       if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) {
+               output->printf(output,
+                       "  failed to copy frame pointer %p\n", tail);
+               return NULL;
+       }
+
+       output->printf(output, "  %p\n", buftail[0].lr);
+
+       /* frame pointers should strictly progress back up the stack
+        * (towards higher addresses) */
+       if (tail >= buftail[0].fp)
+               return NULL;
+
+       return buftail[0].fp-1;
+}
+
+void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
+               const struct pt_regs *regs, unsigned int depth, void *ssp)
+{
+       struct frame_tail *tail;
+       struct thread_info *real_thread_info = THREAD_INFO(ssp);
+       struct stacktrace_state sts;
+
+       sts.depth = depth;
+       sts.output = output;
+       *current_thread_info() = *real_thread_info;
+
+       if (!current)
+               output->printf(output, "current NULL\n");
+       else
+               output->printf(output, "pid: %d  comm: %s\n",
+                       current->pid, current->comm);
+       fiq_debugger_dump_regs(output, regs);
+
+       if (!user_mode(regs)) {
+               struct stackframe frame;
+               frame.fp = regs->ARM_fp;
+               frame.sp = regs->ARM_sp;
+               frame.lr = regs->ARM_lr;
+               frame.pc = regs->ARM_pc;
+               output->printf(output,
+                       "  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+                       regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr,
+                       regs->ARM_sp, regs->ARM_fp);
+               walk_stackframe(&frame, report_trace, &sts);
+               return;
+       }
+
+       tail = ((struct frame_tail *) regs->ARM_fp) - 1;
+       while (depth-- && tail && !((unsigned long) tail & 3))
+               tail = user_backtrace(output, tail);
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c
new file mode 100644 (file)
index 0000000..97246bc
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ptrace.h>
+#include <asm/stacktrace.h>
+
+#include "fiq_debugger_priv.h"
+
+static char *mode_name(const struct pt_regs *regs)
+{
+       if (compat_user_mode(regs)) {
+               return "USR";
+       } else {
+               switch (processor_mode(regs)) {
+               case PSR_MODE_EL0t: return "EL0t";
+               case PSR_MODE_EL1t: return "EL1t";
+               case PSR_MODE_EL1h: return "EL1h";
+               case PSR_MODE_EL2t: return "EL2t";
+               case PSR_MODE_EL2h: return "EL2h";
+               default: return "???";
+               }
+       }
+}
+
+void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+       output->printf(output, " pc %016lx cpsr %08lx mode %s\n",
+               regs->pc, regs->pstate, mode_name(regs));
+}
+
+void fiq_debugger_dump_regs_aarch32(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+       output->printf(output, " r0 %08x  r1 %08x  r2 %08x  r3 %08x\n",
+                       regs->compat_usr(0), regs->compat_usr(1),
+                       regs->compat_usr(2), regs->compat_usr(3));
+       output->printf(output, " r4 %08x  r5 %08x  r6 %08x  r7 %08x\n",
+                       regs->compat_usr(4), regs->compat_usr(5),
+                       regs->compat_usr(6), regs->compat_usr(7));
+       output->printf(output, " r8 %08x  r9 %08x r10 %08x r11 %08x\n",
+                       regs->compat_usr(8), regs->compat_usr(9),
+                       regs->compat_usr(10), regs->compat_usr(11));
+       output->printf(output, " ip %08x  sp %08x  lr %08x  pc %08x\n",
+                       regs->compat_usr(12), regs->compat_sp,
+                       regs->compat_lr, regs->pc);
+       output->printf(output, " cpsr %08x (%s)\n",
+                       regs->pstate, mode_name(regs));
+}
+
+void fiq_debugger_dump_regs_aarch64(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+
+       output->printf(output, "  x0 %016lx   x1 %016lx\n",
+                       regs->regs[0], regs->regs[1]);
+       output->printf(output, "  x2 %016lx   x3 %016lx\n",
+                       regs->regs[2], regs->regs[3]);
+       output->printf(output, "  x4 %016lx   x5 %016lx\n",
+                       regs->regs[4], regs->regs[5]);
+       output->printf(output, "  x6 %016lx   x7 %016lx\n",
+                       regs->regs[6], regs->regs[7]);
+       output->printf(output, "  x8 %016lx   x9 %016lx\n",
+                       regs->regs[8], regs->regs[9]);
+       output->printf(output, " x10 %016lx  x11 %016lx\n",
+                       regs->regs[10], regs->regs[11]);
+       output->printf(output, " x12 %016lx  x13 %016lx\n",
+                       regs->regs[12], regs->regs[13]);
+       output->printf(output, " x14 %016lx  x15 %016lx\n",
+                       regs->regs[14], regs->regs[15]);
+       output->printf(output, " x16 %016lx  x17 %016lx\n",
+                       regs->regs[16], regs->regs[17]);
+       output->printf(output, " x18 %016lx  x19 %016lx\n",
+                       regs->regs[18], regs->regs[19]);
+       output->printf(output, " x20 %016lx  x21 %016lx\n",
+                       regs->regs[20], regs->regs[21]);
+       output->printf(output, " x22 %016lx  x23 %016lx\n",
+                       regs->regs[22], regs->regs[23]);
+       output->printf(output, " x24 %016lx  x25 %016lx\n",
+                       regs->regs[24], regs->regs[25]);
+       output->printf(output, " x26 %016lx  x27 %016lx\n",
+                       regs->regs[26], regs->regs[27]);
+       output->printf(output, " x28 %016lx  x29 %016lx\n",
+                       regs->regs[28], regs->regs[29]);
+       output->printf(output, " x30 %016lx   sp %016lx\n",
+                       regs->regs[30], regs->sp);
+       output->printf(output, "  pc %016lx cpsr %08x (%s)\n",
+                       regs->pc, regs->pstate, mode_name(regs));
+}
+
+void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+       if (compat_user_mode(regs))
+               fiq_debugger_dump_regs_aarch32(output, regs);
+       else
+               fiq_debugger_dump_regs_aarch64(output, regs);
+}
+
+#define READ_SPECIAL_REG(x) ({ \
+       u64 val; \
+       asm volatile ("mrs %0, " # x : "=r"(val)); \
+       val; \
+})
+
+void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+       u32 pstate = READ_SPECIAL_REG(CurrentEl);
+       bool in_el2 = (pstate & PSR_MODE_MASK) >= PSR_MODE_EL2t;
+
+       fiq_debugger_dump_regs(output, regs);
+
+       output->printf(output, " sp_el0   %016lx\n",
+                       READ_SPECIAL_REG(sp_el0));
+
+       if (in_el2)
+               output->printf(output, " sp_el1   %016lx\n",
+                               READ_SPECIAL_REG(sp_el1));
+
+       output->printf(output, " elr_el1  %016lx\n",
+                       READ_SPECIAL_REG(elr_el1));
+
+       output->printf(output, " spsr_el1 %08lx\n",
+                       READ_SPECIAL_REG(spsr_el1));
+
+       if (in_el2) {
+               output->printf(output, " spsr_irq %08lx\n",
+                               READ_SPECIAL_REG(spsr_irq));
+               output->printf(output, " spsr_abt %08lx\n",
+                               READ_SPECIAL_REG(spsr_abt));
+               output->printf(output, " spsr_und %08lx\n",
+                               READ_SPECIAL_REG(spsr_und));
+               output->printf(output, " spsr_fiq %08lx\n",
+                               READ_SPECIAL_REG(spsr_fiq));
+               output->printf(output, " spsr_el2 %08lx\n",
+                               READ_SPECIAL_REG(elr_el2));
+               output->printf(output, " spsr_el2 %08lx\n",
+                               READ_SPECIAL_REG(spsr_el2));
+       }
+}
+
+struct stacktrace_state {
+       struct fiq_debugger_output *output;
+       unsigned int depth;
+};
+
+static int report_trace(struct stackframe *frame, void *d)
+{
+       struct stacktrace_state *sts = d;
+
+       if (sts->depth) {
+               sts->output->printf(sts->output, "%pF:\n", frame->pc);
+               sts->output->printf(sts->output,
+                               "  pc %016lx   sp %016lx   fp %016lx\n",
+                               frame->pc, frame->sp, frame->fp);
+               sts->depth--;
+               return 0;
+       }
+       sts->output->printf(sts->output, "  ...\n");
+
+       return sts->depth == 0;
+}
+
+void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
+               const struct pt_regs *regs, unsigned int depth, void *ssp)
+{
+       struct thread_info *real_thread_info = THREAD_INFO(ssp);
+       struct stacktrace_state sts;
+
+       sts.depth = depth;
+       sts.output = output;
+       *current_thread_info() = *real_thread_info;
+
+       if (!current)
+               output->printf(output, "current NULL\n");
+       else
+               output->printf(output, "pid: %d  comm: %s\n",
+                       current->pid, current->comm);
+       fiq_debugger_dump_regs(output, regs);
+
+       if (!user_mode(regs)) {
+               struct stackframe frame;
+               frame.fp = regs->regs[29];
+               frame.sp = regs->sp;
+               frame.pc = regs->pc;
+               output->printf(output, "\n");
+               walk_stackframe(current, &frame, report_trace, &sts);
+       }
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h b/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h
new file mode 100644 (file)
index 0000000..d5d051f
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _FIQ_DEBUGGER_PRIV_H_
+#define _FIQ_DEBUGGER_PRIV_H_
+
+#define THREAD_INFO(sp) ((struct thread_info *) \
+               ((unsigned long)(sp) & ~(THREAD_SIZE - 1)))
+
+struct fiq_debugger_output {
+       void (*printf)(struct fiq_debugger_output *output, const char *fmt, ...);
+};
+
+struct pt_regs;
+
+void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
+               const struct pt_regs *regs);
+void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
+               const struct pt_regs *regs);
+void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
+               const struct pt_regs *regs);
+void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
+               const struct pt_regs *regs, unsigned int depth, void *ssp);
+
+#endif
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h b/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
new file mode 100644 (file)
index 0000000..10c3c5d
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
+ *
+ * simple lockless ringbuffer
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+struct fiq_debugger_ringbuf {
+       int len;
+       int head;
+       int tail;
+       u8 buf[];
+};
+
+
+static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len)
+{
+       struct fiq_debugger_ringbuf *rbuf;
+
+       rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL);
+       if (rbuf == NULL)
+               return NULL;
+
+       rbuf->len = len;
+       rbuf->head = 0;
+       rbuf->tail = 0;
+       smp_mb();
+
+       return rbuf;
+}
+
+static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf)
+{
+       kfree(rbuf);
+}
+
+static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf)
+{
+       int level = rbuf->head - rbuf->tail;
+
+       if (level < 0)
+               level = rbuf->len + level;
+
+       return level;
+}
+
+static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf)
+{
+       return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1;
+}
+
+static inline u8
+fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i)
+{
+       return rbuf->buf[(rbuf->tail + i) % rbuf->len];
+}
+
+static inline int
+fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count)
+{
+       count = min(count, fiq_debugger_ringbuf_level(rbuf));
+
+       rbuf->tail = (rbuf->tail + count) % rbuf->len;
+       smp_mb();
+
+       return count;
+}
+
+static inline int
+fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum)
+{
+       if (fiq_debugger_ringbuf_room(rbuf) == 0)
+               return 0;
+
+       rbuf->buf[rbuf->head] = datum;
+       smp_mb();
+       rbuf->head = (rbuf->head + 1) % rbuf->len;
+       smp_mb();
+
+       return 1;
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.c b/drivers/staging/android/fiq_debugger/fiq_watchdog.c
new file mode 100644 (file)
index 0000000..194b541
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/pstore_ram.h>
+
+#include "fiq_watchdog.h"
+#include "fiq_debugger_priv.h"
+
+static DEFINE_RAW_SPINLOCK(fiq_watchdog_lock);
+
+static void fiq_watchdog_printf(struct fiq_debugger_output *output,
+                               const char *fmt, ...)
+{
+       char buf[256];
+       va_list ap;
+       int len;
+
+       va_start(ap, fmt);
+       len = vscnprintf(buf, sizeof(buf), fmt, ap);
+       va_end(ap);
+
+       ramoops_console_write_buf(buf, len);
+}
+
+struct fiq_debugger_output fiq_watchdog_output = {
+       .printf = fiq_watchdog_printf,
+};
+
+void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp)
+{
+       char msg[24];
+       int len;
+
+       raw_spin_lock(&fiq_watchdog_lock);
+
+       len = scnprintf(msg, sizeof(msg), "watchdog fiq cpu %d\n",
+                       THREAD_INFO(svc_sp)->cpu);
+       ramoops_console_write_buf(msg, len);
+
+       fiq_debugger_dump_stacktrace(&fiq_watchdog_output, regs, 100, svc_sp);
+
+       raw_spin_unlock(&fiq_watchdog_lock);
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.h b/drivers/staging/android/fiq_debugger/fiq_watchdog.h
new file mode 100644 (file)
index 0000000..c6b507f
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _FIQ_WATCHDOG_H_
+#define _FIQ_WATCHDOG_H_
+
+void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp);
+
+#endif
index 3452346244922d1211b674087641c1009b2c3e0b..356e109692728dcc269173e10290b90721f41ef9 100644 (file)
@@ -33,3 +33,10 @@ config ION_TEGRA
        help
          Choose this option if you wish to use ion on an nVidia Tegra.
 
+config ION_POOL_CACHE_POLICY
+       bool "Ion set page pool cache policy"
+       depends on ION && X86
+       default y if X86
+       help
+         Choose this option if need to explicity set cache policy of the
+         pages in the page pool.
index df560216d7026c87de39c8b86f885ea2443a3d17..374f840f31a486b6d187582f2bc971d0b2435362 100644 (file)
@@ -387,13 +387,22 @@ static void ion_handle_get(struct ion_handle *handle)
        kref_get(&handle->ref);
 }
 
-static int ion_handle_put(struct ion_handle *handle)
+static int ion_handle_put_nolock(struct ion_handle *handle)
+{
+       int ret;
+
+       ret = kref_put(&handle->ref, ion_handle_destroy);
+
+       return ret;
+}
+
+int ion_handle_put(struct ion_handle *handle)
 {
        struct ion_client *client = handle->client;
        int ret;
 
        mutex_lock(&client->lock);
-       ret = kref_put(&handle->ref, ion_handle_destroy);
+       ret = ion_handle_put_nolock(handle);
        mutex_unlock(&client->lock);
 
        return ret;
@@ -417,20 +426,30 @@ static struct ion_handle *ion_handle_lookup(struct ion_client *client,
        return ERR_PTR(-EINVAL);
 }
 
-static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
                                                int id)
 {
        struct ion_handle *handle;
 
-       mutex_lock(&client->lock);
        handle = idr_find(&client->idr, id);
        if (handle)
                ion_handle_get(handle);
-       mutex_unlock(&client->lock);
 
        return handle ? handle : ERR_PTR(-EINVAL);
 }
 
+struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+                                               int id)
+{
+       struct ion_handle *handle;
+
+       mutex_lock(&client->lock);
+       handle = ion_handle_get_by_id_nolock(client, id);
+       mutex_unlock(&client->lock);
+
+       return handle;
+}
+
 static bool ion_handle_validate(struct ion_client *client,
                                struct ion_handle *handle)
 {
@@ -532,22 +551,28 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
 }
 EXPORT_SYMBOL(ion_alloc);
 
-void ion_free(struct ion_client *client, struct ion_handle *handle)
+static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
 {
        bool valid_handle;
 
        BUG_ON(client != handle->client);
 
-       mutex_lock(&client->lock);
        valid_handle = ion_handle_validate(client, handle);
 
        if (!valid_handle) {
                WARN(1, "%s: invalid handle passed to free.\n", __func__);
-               mutex_unlock(&client->lock);
                return;
        }
+       ion_handle_put_nolock(handle);
+}
+
+void ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+       BUG_ON(client != handle->client);
+
+       mutex_lock(&client->lock);
+       ion_free_nolock(client, handle);
        mutex_unlock(&client->lock);
-       ion_handle_put(handle);
 }
 EXPORT_SYMBOL(ion_free);
 
@@ -1283,11 +1308,15 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        {
                struct ion_handle *handle;
 
-               handle = ion_handle_get_by_id(client, data.handle.handle);
-               if (IS_ERR(handle))
+               mutex_lock(&client->lock);
+               handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
+               if (IS_ERR(handle)) {
+                       mutex_unlock(&client->lock);
                        return PTR_ERR(handle);
-               ion_free(client, handle);
-               ion_handle_put(handle);
+               }
+               ion_free_nolock(client, handle);
+               ion_handle_put_nolock(handle);
+               mutex_unlock(&client->lock);
                break;
        }
        case ION_IOC_SHARE:
index 9156d8238c9797cd1d4c3c03365461e176772c2d..e702ce6461fc9b4a47f8c513c68e25fe80bdacc4 100644 (file)
@@ -167,7 +167,7 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
        if (!carveout_heap)
                return ERR_PTR(-ENOMEM);
 
-       carveout_heap->pool = gen_pool_create(12, -1);
+       carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
        if (!carveout_heap->pool) {
                kfree(carveout_heap);
                return ERR_PTR(-ENOMEM);
index fd7e23e0c06e9bcc47659e4d692819d6b61fe1a5..59ee2f8f67611ec8a92c2be94ec5712881bd2830 100644 (file)
@@ -30,6 +30,8 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
 
        if (!page)
                return NULL;
+       ion_page_pool_alloc_set_cache_policy(pool, page);
+
        ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
                                                DMA_BIDIRECTIONAL);
        return page;
@@ -38,6 +40,7 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
 static void ion_page_pool_free_pages(struct ion_page_pool *pool,
                                     struct page *page)
 {
+       ion_page_pool_free_set_cache_policy(pool, page);
        __free_pages(page, pool->order);
 }
 
@@ -103,6 +106,11 @@ void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
                ion_page_pool_free_pages(pool, page);
 }
 
+void ion_page_pool_free_immediate(struct ion_page_pool *pool, struct page *page)
+{
+       ion_page_pool_free_pages(pool, page);
+}
+
 static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
 {
        int count = pool->low_count;
index 0239883bffb72a82c0ffb05bc9aaaf0ee52f32aa..6f59a2d36567a5dd64b3761f217f9e894cccd898 100644 (file)
@@ -26,6 +26,9 @@
 #include <linux/sched.h>
 #include <linux/shrinker.h>
 #include <linux/types.h>
+#ifdef CONFIG_ION_POOL_CACHE_POLICY
+#include <asm/cacheflush.h>
+#endif
 
 #include "ion.h"
 
@@ -381,6 +384,37 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
 void ion_page_pool_destroy(struct ion_page_pool *);
 struct page *ion_page_pool_alloc(struct ion_page_pool *);
 void ion_page_pool_free(struct ion_page_pool *, struct page *);
+void ion_page_pool_free_immediate(struct ion_page_pool *, struct page *);
+
+#ifdef CONFIG_ION_POOL_CACHE_POLICY
+static inline void ion_page_pool_alloc_set_cache_policy
+                               (struct ion_page_pool *pool,
+                               struct page *page){
+       void *va = page_address(page);
+
+       if (va)
+               set_memory_wc((unsigned long)va, 1 << pool->order);
+}
+
+static inline void ion_page_pool_free_set_cache_policy
+                               (struct ion_page_pool *pool,
+                               struct page *page){
+       void *va = page_address(page);
+
+       if (va)
+               set_memory_wb((unsigned long)va, 1 << pool->order);
+
+}
+#else
+static inline void ion_page_pool_alloc_set_cache_policy
+                               (struct ion_page_pool *pool,
+                               struct page *page){ }
+
+static inline void ion_page_pool_free_set_cache_policy
+                               (struct ion_page_pool *pool,
+                               struct page *page){ }
+#endif
+
 
 /** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
  * @pool:              the pool
index d4c3e5512dd54dbcf0b3f6d9a927a1e8bbb3fc5a..57d115d0f1791f68ff2c277f97c63b6b69bfe6eb 100644 (file)
@@ -83,10 +83,12 @@ static void free_buffer_page(struct ion_system_heap *heap,
        unsigned int order = compound_order(page);
        bool cached = ion_buffer_cached(buffer);
 
-       if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
+       if (!cached) {
                struct ion_page_pool *pool = heap->pools[order_to_index(order)];
-
-               ion_page_pool_free(pool, page);
+               if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)
+                       ion_page_pool_free_immediate(pool, page);
+               else
+                       ion_page_pool_free(pool, page);
        } else {
                __free_pages(page, order);
        }
index e679d8432810bf8bad5cdb83ab8cb9abe59df503..af49af0cca01ba4231ee0fc564cf56987371959d 100644 (file)
@@ -43,6 +43,9 @@
 #include <linux/profile.h>
 #include <linux/notifier.h>
 
+#define CREATE_TRACE_POINTS
+#include "trace/lowmemorykiller.h"
+
 static uint32_t lowmem_debug_level = 1;
 static short lowmem_adj[6] = {
        0,
@@ -84,6 +87,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
        int tasksize;
        int i;
        short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
+       int minfree = 0;
        int selected_tasksize = 0;
        short selected_oom_score_adj;
        int array_size = ARRAY_SIZE(lowmem_adj);
@@ -97,8 +101,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
        if (lowmem_minfree_size < array_size)
                array_size = lowmem_minfree_size;
        for (i = 0; i < array_size; i++) {
-               if (other_free < lowmem_minfree[i] &&
-                   other_file < lowmem_minfree[i]) {
+               minfree = lowmem_minfree[i];
+               if (other_free < minfree && other_file < minfree) {
                        min_score_adj = lowmem_adj[i];
                        break;
                }
@@ -153,10 +157,14 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
                selected = p;
                selected_tasksize = tasksize;
                selected_oom_score_adj = oom_score_adj;
-               lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n",
-                            p->pid, p->comm, oom_score_adj, tasksize);
+               lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n",
+                            p->comm, p->pid, oom_score_adj, tasksize);
        }
        if (selected) {
+               long cache_size = other_file * (long)(PAGE_SIZE / 1024);
+               long cache_limit = minfree * (long)(PAGE_SIZE / 1024);
+               long free = other_free * (long)(PAGE_SIZE / 1024);
+
                task_lock(selected);
                send_sig(SIGKILL, selected, 0);
                /*
@@ -167,9 +175,18 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
                if (selected->mm)
                        mark_oom_victim(selected);
                task_unlock(selected);
-               lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n",
-                            selected->pid, selected->comm,
-                            selected_oom_score_adj, selected_tasksize);
+               trace_lowmemory_kill(selected, cache_size, cache_limit, free);
+               lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" \
+                               "   to free %ldkB on behalf of '%s' (%d) because\n" \
+                               "   cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \
+                               "   Free memory is %ldkB above reserved\n",
+                            selected->comm, selected->pid,
+                            selected_oom_score_adj,
+                            selected_tasksize * (long)(PAGE_SIZE / 1024),
+                            current->comm, current->pid,
+                            cache_size, cache_limit,
+                            min_score_adj,
+                            free);
                lowmem_deathpending_timeout = jiffies + HZ;
                rem += selected_tasksize;
        }
@@ -193,13 +210,97 @@ static int __init lowmem_init(void)
 }
 device_initcall(lowmem_init);
 
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+static short lowmem_oom_adj_to_oom_score_adj(short oom_adj)
+{
+       if (oom_adj == OOM_ADJUST_MAX)
+               return OOM_SCORE_ADJ_MAX;
+       else
+               return (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
+}
+
+static void lowmem_autodetect_oom_adj_values(void)
+{
+       int i;
+       short oom_adj;
+       short oom_score_adj;
+       int array_size = ARRAY_SIZE(lowmem_adj);
+
+       if (lowmem_adj_size < array_size)
+               array_size = lowmem_adj_size;
+
+       if (array_size <= 0)
+               return;
+
+       oom_adj = lowmem_adj[array_size - 1];
+       if (oom_adj > OOM_ADJUST_MAX)
+               return;
+
+       oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+       if (oom_score_adj <= OOM_ADJUST_MAX)
+               return;
+
+       lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n");
+       for (i = 0; i < array_size; i++) {
+               oom_adj = lowmem_adj[i];
+               oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+               lowmem_adj[i] = oom_score_adj;
+               lowmem_print(1, "oom_adj %d => oom_score_adj %d\n",
+                            oom_adj, oom_score_adj);
+       }
+}
+
+static int lowmem_adj_array_set(const char *val, const struct kernel_param *kp)
+{
+       int ret;
+
+       ret = param_array_ops.set(val, kp);
+
+       /* HACK: Autodetect oom_adj values in lowmem_adj array */
+       lowmem_autodetect_oom_adj_values();
+
+       return ret;
+}
+
+static int lowmem_adj_array_get(char *buffer, const struct kernel_param *kp)
+{
+       return param_array_ops.get(buffer, kp);
+}
+
+static void lowmem_adj_array_free(void *arg)
+{
+       param_array_ops.free(arg);
+}
+
+static struct kernel_param_ops lowmem_adj_array_ops = {
+       .set = lowmem_adj_array_set,
+       .get = lowmem_adj_array_get,
+       .free = lowmem_adj_array_free,
+};
+
+static const struct kparam_array __param_arr_adj = {
+       .max = ARRAY_SIZE(lowmem_adj),
+       .num = &lowmem_adj_size,
+       .ops = &param_ops_short,
+       .elemsize = sizeof(lowmem_adj[0]),
+       .elem = lowmem_adj,
+};
+#endif
+
 /*
  * not really modular, but the easiest way to keep compat with existing
  * bootargs behaviour is to continue using module_param here.
  */
 module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+module_param_cb(adj, &lowmem_adj_array_ops,
+               .arr = &__param_arr_adj,
+               S_IRUGO | S_IWUSR);
+__MODULE_PARM_TYPE(adj, "array of short");
+#else
 module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size,
                         S_IRUGO | S_IWUSR);
+#endif
 module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
                         S_IRUGO | S_IWUSR);
 module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
index f83e00c7805130e1ab6dda8d853c2b642b8a4f92..da101a506cd2612312f72b28cedc1e2f587c915f 100644 (file)
@@ -465,6 +465,13 @@ static bool android_fence_enable_signaling(struct fence *fence)
        return true;
 }
 
+static void android_fence_disable_signaling(struct fence *fence)
+{
+       struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+
+       list_del_init(&pt->active_list);
+}
+
 static int android_fence_fill_driver_data(struct fence *fence,
                                          void *data, int size)
 {
@@ -508,6 +515,7 @@ static const struct fence_ops android_fence_ops = {
        .get_driver_name = android_fence_get_driver_name,
        .get_timeline_name = android_fence_get_timeline_name,
        .enable_signaling = android_fence_enable_signaling,
+       .disable_signaling = android_fence_disable_signaling,
        .signaled = android_fence_signaled,
        .wait = fence_default_wait,
        .release = android_fence_release,
@@ -519,12 +527,10 @@ static const struct fence_ops android_fence_ops = {
 static void sync_fence_free(struct kref *kref)
 {
        struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
-       int i, status = atomic_read(&fence->status);
+       int i;
 
        for (i = 0; i < fence->num_fences; ++i) {
-               if (status)
-                       fence_remove_callback(fence->cbs[i].sync_pt,
-                                             &fence->cbs[i].cb);
+               fence_remove_callback(fence->cbs[i].sync_pt, &fence->cbs[i].cb);
                fence_put(fence->cbs[i].sync_pt);
        }
 
diff --git a/drivers/staging/android/trace/lowmemorykiller.h b/drivers/staging/android/trace/lowmemorykiller.h
new file mode 100644 (file)
index 0000000..f43d3fa
--- /dev/null
@@ -0,0 +1,41 @@
+#undef TRACE_SYSTEM
+#define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace
+#define TRACE_SYSTEM lowmemorykiller
+
+#if !defined(_TRACE_LOWMEMORYKILLER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_LOWMEMORYKILLER_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(lowmemory_kill,
+       TP_PROTO(struct task_struct *killed_task, long cache_size, \
+                long cache_limit, long free),
+
+       TP_ARGS(killed_task, cache_size, cache_limit, free),
+
+       TP_STRUCT__entry(
+                       __array(char, comm, TASK_COMM_LEN)
+                       __field(pid_t, pid)
+                       __field(long, pagecache_size)
+                       __field(long, pagecache_limit)
+                       __field(long, free)
+       ),
+
+       TP_fast_assign(
+                       memcpy(__entry->comm, killed_task->comm, TASK_COMM_LEN);
+                       __entry->pid = killed_task->pid;
+                       __entry->pagecache_size = cache_size;
+                       __entry->pagecache_limit = cache_limit;
+                       __entry->free = free;
+       ),
+
+       TP_printk("%s (%d), page cache %ldkB (limit %ldkB), free %ldKb",
+               __entry->comm, __entry->pid, __entry->pagecache_size,
+               __entry->pagecache_limit, __entry->free)
+);
+
+
+#endif /* if !defined(_TRACE_LOWMEMORYKILLER_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index ba4743c71d6b45d6d42c0c9711609bbd773a34ba..13df42d200b7cc003e46cebd0e28b1997341fa87 100644 (file)
@@ -13,6 +13,7 @@
 #define _UAPI_LINUX_ASHMEM_H
 
 #include <linux/ioctl.h>
+#include <linux/types.h>
 
 #define ASHMEM_NAME_LEN                256
 
index 4e094602437c3dea1c36e7614f808d680a0b4aa1..c579141a7bed8a52059ca4a5331fa6c3d034ac2b 100644 (file)
@@ -4,6 +4,12 @@ config GOLDFISH_AUDIO
        ---help---
          Emulated audio channel for the Goldfish Android Virtual Device
 
+config GOLDFISH_SYNC
+    tristate "Goldfish AVD Sync Driver"
+    depends on GOLDFISH
+       ---help---
+         Emulated sync fences for the Goldfish Android Virtual Device
+
 config MTD_GOLDFISH_NAND
        tristate "Goldfish NAND device"
        depends on GOLDFISH
index dec34ad58162fdda2dde0969b0e004bc3dbfb01d..0cf525588210792e85f0034d3f5c181d24a8dce6 100644 (file)
@@ -4,3 +4,8 @@
 
 obj-$(CONFIG_GOLDFISH_AUDIO) += goldfish_audio.o
 obj-$(CONFIG_MTD_GOLDFISH_NAND)        += goldfish_nand.o
+
+# and sync
+
+ccflags-y := -Idrivers/staging/android
+obj-$(CONFIG_GOLDFISH_SYNC) += goldfish_sync.o
index b0927e49d0a81f31e63895fa2621ed61bfc30a57..63b79c09b41ba2231b7a4b4c3beb644659d3fad4 100644 (file)
@@ -26,7 +26,9 @@
 #include <linux/sched.h>
 #include <linux/dma-mapping.h>
 #include <linux/uaccess.h>
+#include <linux/slab.h>
 #include <linux/goldfish.h>
+#include <linux/acpi.h>
 
 MODULE_AUTHOR("Google, Inc.");
 MODULE_DESCRIPTION("Android QEMU Audio Driver");
@@ -115,6 +117,7 @@ static ssize_t goldfish_audio_read(struct file *fp, char __user *buf,
                                   size_t count, loff_t *pos)
 {
        struct goldfish_audio *data = fp->private_data;
+       unsigned long irq_flags;
        int length;
        int result = 0;
 
@@ -128,6 +131,10 @@ static ssize_t goldfish_audio_read(struct file *fp, char __user *buf,
                wait_event_interruptible(data->wait, data->buffer_status &
                                         AUDIO_INT_READ_BUFFER_FULL);
 
+               spin_lock_irqsave(&data->lock, irq_flags);
+               data->buffer_status &= ~AUDIO_INT_READ_BUFFER_FULL;
+               spin_unlock_irqrestore(&data->lock, irq_flags);
+
                length = AUDIO_READ(data, AUDIO_READ_BUFFER_AVAILABLE);
 
                /* copy data to user space */
@@ -344,11 +351,25 @@ static int goldfish_audio_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id goldfish_audio_of_match[] = {
+       { .compatible = "google,goldfish-audio", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_audio_of_match);
+
+static const struct acpi_device_id goldfish_audio_acpi_match[] = {
+       { "GFSH0005", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_audio_acpi_match);
+
 static struct platform_driver goldfish_audio_driver = {
        .probe          = goldfish_audio_probe,
        .remove         = goldfish_audio_remove,
        .driver = {
-               .name = "goldfish_audio"
+               .name = "goldfish_audio",
+               .of_match_table = goldfish_audio_of_match,
+               .acpi_match_table = ACPI_PTR(goldfish_audio_acpi_match),
        }
 };
 
diff --git a/drivers/staging/goldfish/goldfish_sync.c b/drivers/staging/goldfish/goldfish_sync.c
new file mode 100644 (file)
index 0000000..ba8def2
--- /dev/null
@@ -0,0 +1,987 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/acpi.h>
+
+#include <linux/string.h>
+#include <linux/syscalls.h>
+
+#include "sw_sync.h"
+#include "sync.h"
+
+#define ERR(...) printk(KERN_ERR __VA_ARGS__);
+
+#define INFO(...) printk(KERN_INFO __VA_ARGS__);
+
+#define DPRINT(...) pr_debug(__VA_ARGS__);
+
+#define DTRACE() DPRINT("%s: enter", __func__)
+
+/* The Goldfish sync driver is designed to provide a interface
+ * between the underlying host's sync device and the kernel's
+ * sw_sync.
+ * The purpose of the device/driver is to enable lightweight
+ * creation and signaling of timelines and fences
+ * in order to synchronize the guest with host-side graphics events.
+ *
+ * Each time the interrupt trips, the driver
+ * may perform a sw_sync operation.
+ */
+
+/* The operations are: */
+
+/* Ready signal - used to mark when irq should lower */
+#define CMD_SYNC_READY            0
+
+/* Create a new timeline. writes timeline handle */
+#define CMD_CREATE_SYNC_TIMELINE  1
+
+/* Create a fence object. reads timeline handle and time argument.
+ * Writes fence fd to the SYNC_REG_HANDLE register. */
+#define CMD_CREATE_SYNC_FENCE     2
+
+/* Increments timeline. reads timeline handle and time argument */
+#define CMD_SYNC_TIMELINE_INC     3
+
+/* Destroys a timeline. reads timeline handle */
+#define CMD_DESTROY_SYNC_TIMELINE 4
+
+/* Starts a wait on the host with
+ * the given glsync object and sync thread handle. */
+#define CMD_TRIGGER_HOST_WAIT     5
+
+/* The register layout is: */
+
+#define SYNC_REG_BATCH_COMMAND                0x00 /* host->guest batch commands */
+#define SYNC_REG_BATCH_GUESTCOMMAND           0x04 /* guest->host batch commands */
+#define SYNC_REG_BATCH_COMMAND_ADDR           0x08 /* communicate physical address of host->guest batch commands */
+#define SYNC_REG_BATCH_COMMAND_ADDR_HIGH      0x0c /* 64-bit part */
+#define SYNC_REG_BATCH_GUESTCOMMAND_ADDR      0x10 /* communicate physical address of guest->host commands */
+#define SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH 0x14 /* 64-bit part */
+#define SYNC_REG_INIT                         0x18 /* signals that the device has been probed */
+
+/* There is an ioctl associated with goldfish sync driver.
+ * Make it conflict with ioctls that are not likely to be used
+ * in the emulator.
+ *
+ * '@' 00-0F   linux/radeonfb.h        conflict!
+ * '@' 00-0F   drivers/video/aty/aty128fb.c    conflict!
+ */
+#define GOLDFISH_SYNC_IOC_MAGIC        '@'
+
+#define GOLDFISH_SYNC_IOC_QUEUE_WORK   _IOWR(GOLDFISH_SYNC_IOC_MAGIC, 0, struct goldfish_sync_ioctl_info)
+
+/* The above definitions (command codes, register layout, ioctl definitions)
+ * need to be in sync with the following files:
+ *
+ * Host-side (emulator):
+ * external/qemu/android/emulation/goldfish_sync.h
+ * external/qemu-android/hw/misc/goldfish_sync.c
+ *
+ * Guest-side (system image):
+ * device/generic/goldfish-opengl/system/egl/goldfish_sync.h
+ * device/generic/goldfish/ueventd.ranchu.rc
+ * platform/build/target/board/generic/sepolicy/file_contexts
+ */
+struct goldfish_sync_hostcmd {
+       /* sorted for alignment */
+       uint64_t handle;
+       uint64_t hostcmd_handle;
+       uint32_t cmd;
+       uint32_t time_arg;
+};
+
+struct goldfish_sync_guestcmd {
+       uint64_t host_command; /* uint64_t for alignment */
+       uint64_t glsync_handle;
+       uint64_t thread_handle;
+       uint64_t guest_timeline_handle;
+};
+
+#define GOLDFISH_SYNC_MAX_CMDS 64
+
+struct goldfish_sync_state {
+       char __iomem *reg_base;
+       int irq;
+
+       /* Spinlock protects |to_do| / |to_do_end|. */
+       spinlock_t lock;
+       /* |mutex_lock| protects all concurrent access
+        * to timelines for both kernel and user space. */
+       struct mutex mutex_lock;
+
+       /* Buffer holding commands issued from host. */
+       struct goldfish_sync_hostcmd to_do[GOLDFISH_SYNC_MAX_CMDS];
+       uint32_t to_do_end;
+
+       /* Addresses for the reading or writing
+        * of individual commands. The host can directly write
+        * to |batch_hostcmd| (and then this driver immediately
+        * copies contents to |to_do|). This driver either replies
+        * through |batch_hostcmd| or simply issues a
+        * guest->host command through |batch_guestcmd|.
+        */
+       struct goldfish_sync_hostcmd *batch_hostcmd;
+       struct goldfish_sync_guestcmd *batch_guestcmd;
+
+       /* Used to give this struct itself to a work queue
+        * function for executing actual sync commands. */
+       struct work_struct work_item;
+};
+
+static struct goldfish_sync_state global_sync_state[1];
+
+struct goldfish_sync_timeline_obj {
+       struct sw_sync_timeline *sw_sync_tl;
+       uint32_t current_time;
+       /* We need to be careful about when we deallocate
+        * this |goldfish_sync_timeline_obj| struct.
+        * In order to ensure proper cleanup, we need to
+        * consider the triggered host-side wait that may
+        * still be in flight when the guest close()'s a
+        * goldfish_sync device's sync context fd (and
+        * destroys the |sw_sync_tl| field above).
+        * The host-side wait may raise IRQ
+        * and tell the kernel to increment the timeline _after_
+        * the |sw_sync_tl| has already been set to null.
+        *
+        * From observations on OpenGL apps and CTS tests, this
+        * happens at some very low probability upon context
+        * destruction or process close, but it does happen
+        * and it needs to be handled properly. Otherwise,
+        * if we clean up the surrounding |goldfish_sync_timeline_obj|
+        * too early, any |handle| field of any host->guest command
+        * might not even point to a null |sw_sync_tl| field,
+        * but to garbage memory or even a reclaimed |sw_sync_tl|.
+        * If we do not count such "pending waits" and kfree the object
+        * immediately upon |goldfish_sync_timeline_destroy|,
+        * we might get mysterous RCU stalls after running a long
+        * time because the garbage memory that is being read
+        * happens to be interpretable as a |spinlock_t| struct
+        * that is currently in the locked state.
+        *
+        * To track when to free the |goldfish_sync_timeline_obj|
+        * itself, we maintain a kref.
+        * The kref essentially counts the timeline itself plus
+        * the number of waits in flight. kref_init/kref_put
+        * are issued on
+        * |goldfish_sync_timeline_create|/|goldfish_sync_timeline_destroy|
+        * and kref_get/kref_put are issued on
+        * |goldfish_sync_fence_create|/|goldfish_sync_timeline_inc|.
+        *
+        * The timeline is destroyed after reference count
+        * reaches zero, which would happen after
+        * |goldfish_sync_timeline_destroy| and all pending
+        * |goldfish_sync_timeline_inc|'s are fulfilled.
+        *
+        * NOTE (1): We assume that |fence_create| and
+        * |timeline_inc| calls are 1:1, otherwise the kref scheme
+        * will not work. This is a valid assumption as long
+        * as the host-side virtual device implementation
+        * does not insert any timeline increments
+        * that we did not trigger from here.
+        *
+        * NOTE (2): The use of kref by itself requires no locks,
+        * but this does not mean everything works without locks.
+        * Related timeline operations do require a lock of some sort,
+        * or at least are not proven to work without it.
+        * In particualr, we assume that all the operations
+        * done on the |kref| field above are done in contexts where
+        * |global_sync_state->mutex_lock| is held. Do not
+        * remove that lock until everything is proven to work
+        * without it!!! */
+       struct kref kref;
+};
+
+/* We will call |delete_timeline_obj| when the last reference count
+ * of the kref is decremented. This deletes the sw_sync
+ * timeline object along with the wrapper itself. */
+static void delete_timeline_obj(struct kref* kref) {
+       struct goldfish_sync_timeline_obj* obj =
+               container_of(kref, struct goldfish_sync_timeline_obj, kref);
+
+       sync_timeline_destroy(&obj->sw_sync_tl->obj);
+       obj->sw_sync_tl = NULL;
+       kfree(obj);
+}
+
+static uint64_t gensym_ctr;
+static void gensym(char *dst)
+{
+       sprintf(dst, "goldfish_sync:gensym:%llu", gensym_ctr);
+       gensym_ctr++;
+}
+
+/* |goldfish_sync_timeline_create| assumes that |global_sync_state->mutex_lock|
+ * is held. */
+static struct goldfish_sync_timeline_obj*
+goldfish_sync_timeline_create(void)
+{
+
+       char timeline_name[256];
+       struct sw_sync_timeline *res_sync_tl = NULL;
+       struct goldfish_sync_timeline_obj *res;
+
+       DTRACE();
+
+       gensym(timeline_name);
+
+       res_sync_tl = sw_sync_timeline_create(timeline_name);
+       if (!res_sync_tl) {
+               ERR("Failed to create sw_sync timeline.");
+               return NULL;
+       }
+
+       res = kzalloc(sizeof(struct goldfish_sync_timeline_obj), GFP_KERNEL);
+       res->sw_sync_tl = res_sync_tl;
+       res->current_time = 0;
+       kref_init(&res->kref);
+
+       DPRINT("new timeline_obj=0x%p", res);
+       return res;
+}
+
+/* |goldfish_sync_fence_create| assumes that |global_sync_state->mutex_lock|
+ * is held. */
+static int
+goldfish_sync_fence_create(struct goldfish_sync_timeline_obj *obj,
+                                                       uint32_t val)
+{
+
+       int fd;
+       char fence_name[256];
+       struct sync_pt *syncpt = NULL;
+       struct sync_fence *sync_obj = NULL;
+       struct sw_sync_timeline *tl;
+
+       DTRACE();
+
+       if (!obj) return -1;
+
+       tl = obj->sw_sync_tl;
+
+       syncpt = sw_sync_pt_create(tl, val);
+       if (!syncpt) {
+               ERR("could not create sync point! "
+                       "sync_timeline=0x%p val=%d",
+                          tl, val);
+               return -1;
+       }
+
+       fd = get_unused_fd_flags(O_CLOEXEC);
+       if (fd < 0) {
+               ERR("could not get unused fd for sync fence. "
+                       "errno=%d", fd);
+               goto err_cleanup_pt;
+       }
+
+       gensym(fence_name);
+
+       sync_obj = sync_fence_create(fence_name, syncpt);
+       if (!sync_obj) {
+               ERR("could not create sync fence! "
+                       "sync_timeline=0x%p val=%d sync_pt=0x%p",
+                          tl, val, syncpt);
+               goto err_cleanup_fd_pt;
+       }
+
+       DPRINT("installing sync fence into fd %d sync_obj=0x%p", fd, sync_obj);
+       sync_fence_install(sync_obj, fd);
+       kref_get(&obj->kref);
+
+       return fd;
+
+err_cleanup_fd_pt:
+       put_unused_fd(fd);
+err_cleanup_pt:
+       sync_pt_free(syncpt);
+       return -1;
+}
+
+/* |goldfish_sync_timeline_inc| assumes that |global_sync_state->mutex_lock|
+ * is held. */
+static void
+goldfish_sync_timeline_inc(struct goldfish_sync_timeline_obj *obj, uint32_t inc)
+{
+       DTRACE();
+       /* Just give up if someone else nuked the timeline.
+        * Whoever it was won't care that it doesn't get signaled. */
+       if (!obj) return;
+
+       DPRINT("timeline_obj=0x%p", obj);
+       sw_sync_timeline_inc(obj->sw_sync_tl, inc);
+       DPRINT("incremented timeline. increment max_time");
+       obj->current_time += inc;
+
+       /* Here, we will end up deleting the timeline object if it
+        * turns out that this call was a pending increment after
+        * |goldfish_sync_timeline_destroy| was called. */
+       kref_put(&obj->kref, delete_timeline_obj);
+       DPRINT("done");
+}
+
+/* |goldfish_sync_timeline_destroy| assumes
+ * that |global_sync_state->mutex_lock| is held. */
+static void
+goldfish_sync_timeline_destroy(struct goldfish_sync_timeline_obj *obj)
+{
+       DTRACE();
+       /* See description of |goldfish_sync_timeline_obj| for why we
+        * should not immediately destroy |obj| */
+       kref_put(&obj->kref, delete_timeline_obj);
+}
+
+static inline void
+goldfish_sync_cmd_queue(struct goldfish_sync_state *sync_state,
+                                               uint32_t cmd,
+                                               uint64_t handle,
+                                               uint32_t time_arg,
+                                               uint64_t hostcmd_handle)
+{
+       struct goldfish_sync_hostcmd *to_add;
+
+       DTRACE();
+
+       BUG_ON(sync_state->to_do_end == GOLDFISH_SYNC_MAX_CMDS);
+
+       to_add = &sync_state->to_do[sync_state->to_do_end];
+
+       to_add->cmd = cmd;
+       to_add->handle = handle;
+       to_add->time_arg = time_arg;
+       to_add->hostcmd_handle = hostcmd_handle;
+
+       sync_state->to_do_end += 1;
+}
+
+static inline void
+goldfish_sync_hostcmd_reply(struct goldfish_sync_state *sync_state,
+                                                       uint32_t cmd,
+                                                       uint64_t handle,
+                                                       uint32_t time_arg,
+                                                       uint64_t hostcmd_handle)
+{
+       unsigned long irq_flags;
+       struct goldfish_sync_hostcmd *batch_hostcmd =
+               sync_state->batch_hostcmd;
+
+       DTRACE();
+
+       spin_lock_irqsave(&sync_state->lock, irq_flags);
+
+       batch_hostcmd->cmd = cmd;
+       batch_hostcmd->handle = handle;
+       batch_hostcmd->time_arg = time_arg;
+       batch_hostcmd->hostcmd_handle = hostcmd_handle;
+       writel(0, sync_state->reg_base + SYNC_REG_BATCH_COMMAND);
+
+       spin_unlock_irqrestore(&sync_state->lock, irq_flags);
+}
+
+static inline void
+goldfish_sync_send_guestcmd(struct goldfish_sync_state *sync_state,
+                                                       uint32_t cmd,
+                                                       uint64_t glsync_handle,
+                                                       uint64_t thread_handle,
+                                                       uint64_t timeline_handle)
+{
+       unsigned long irq_flags;
+       struct goldfish_sync_guestcmd *batch_guestcmd =
+               sync_state->batch_guestcmd;
+
+       DTRACE();
+
+       spin_lock_irqsave(&sync_state->lock, irq_flags);
+
+       batch_guestcmd->host_command = (uint64_t)cmd;
+       batch_guestcmd->glsync_handle = (uint64_t)glsync_handle;
+       batch_guestcmd->thread_handle = (uint64_t)thread_handle;
+       batch_guestcmd->guest_timeline_handle = (uint64_t)timeline_handle;
+       writel(0, sync_state->reg_base + SYNC_REG_BATCH_GUESTCOMMAND);
+
+       spin_unlock_irqrestore(&sync_state->lock, irq_flags);
+}
+
+/* |goldfish_sync_interrupt| handles IRQ raises from the virtual device.
+ * In the context of OpenGL, this interrupt will fire whenever we need
+ * to signal a fence fd in the guest, with the command
+ * |CMD_SYNC_TIMELINE_INC|.
+ * However, because this function will be called in an interrupt context,
+ * it is necessary to do the actual work of signaling off of interrupt context.
+ * The shared work queue is used for this purpose. At the end when
+ * all pending commands are intercepted by the interrupt handler,
+ * we call |schedule_work|, which will later run the actual
+ * desired sync command in |goldfish_sync_work_item_fn|.
+ */
+static irqreturn_t goldfish_sync_interrupt(int irq, void *dev_id)
+{
+
+       struct goldfish_sync_state *sync_state = dev_id;
+
+       uint32_t nextcmd;
+       uint32_t command_r;
+       uint64_t handle_rw;
+       uint32_t time_r;
+       uint64_t hostcmd_handle_rw;
+
+       int count = 0;
+
+       DTRACE();
+
+       sync_state = dev_id;
+
+       spin_lock(&sync_state->lock);
+
+       for (;;) {
+
+               readl(sync_state->reg_base + SYNC_REG_BATCH_COMMAND);
+               nextcmd = sync_state->batch_hostcmd->cmd;
+
+               if (nextcmd == 0)
+                       break;
+
+               command_r = nextcmd;
+               handle_rw = sync_state->batch_hostcmd->handle;
+               time_r = sync_state->batch_hostcmd->time_arg;
+               hostcmd_handle_rw = sync_state->batch_hostcmd->hostcmd_handle;
+
+               goldfish_sync_cmd_queue(
+                               sync_state,
+                               command_r,
+                               handle_rw,
+                               time_r,
+                               hostcmd_handle_rw);
+
+               count++;
+       }
+
+       spin_unlock(&sync_state->lock);
+
+       schedule_work(&sync_state->work_item);
+
+       return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
+}
+
+/* |goldfish_sync_work_item_fn| does the actual work of servicing
+ * host->guest sync commands. This function is triggered whenever
+ * the IRQ for the goldfish sync device is raised. Once it starts
+ * running, it grabs the contents of the buffer containing the
+ * commands it needs to execute (there may be multiple, because
+ * our IRQ is active high and not edge triggered), and then
+ * runs all of them one after the other.
+ */
+static void goldfish_sync_work_item_fn(struct work_struct *input)
+{
+
+       struct goldfish_sync_state *sync_state;
+       int sync_fence_fd;
+
+       struct goldfish_sync_timeline_obj *timeline;
+       uint64_t timeline_ptr;
+
+       uint64_t hostcmd_handle;
+
+       uint32_t cmd;
+       uint64_t handle;
+       uint32_t time_arg;
+
+       struct goldfish_sync_hostcmd *todo;
+       uint32_t todo_end;
+
+       unsigned long irq_flags;
+
+       struct goldfish_sync_hostcmd to_run[GOLDFISH_SYNC_MAX_CMDS];
+       uint32_t i = 0;
+
+       sync_state = container_of(input, struct goldfish_sync_state, work_item);
+
+       mutex_lock(&sync_state->mutex_lock);
+
+       spin_lock_irqsave(&sync_state->lock, irq_flags); {
+
+               todo_end = sync_state->to_do_end;
+
+               DPRINT("num sync todos: %u", sync_state->to_do_end);
+
+               for (i = 0; i < todo_end; i++)
+                       to_run[i] = sync_state->to_do[i];
+
+               /* We expect that commands will come in at a slow enough rate
+                * so that incoming items will not be more than
+                * GOLDFISH_SYNC_MAX_CMDS.
+                *
+                * This is because the way the sync device is used,
+                * it's only for managing buffer data transfers per frame,
+                * with a sequential dependency between putting things in
+                * to_do and taking them out. Once a set of commands is
+                * queued up in to_do, the user of the device waits for
+                * them to be processed before queuing additional commands,
+                * which limits the rate at which commands come in
+                * to the rate at which we take them out here.
+                *
+                * We also don't expect more than MAX_CMDS to be issued
+                * at once; there is a correspondence between
+                * which buffers need swapping to the (display / buffer queue)
+                * to particular commands, and we don't expect there to be
+                * enough display or buffer queues in operation at once
+                * to overrun GOLDFISH_SYNC_MAX_CMDS.
+                */
+               sync_state->to_do_end = 0;
+
+       } spin_unlock_irqrestore(&sync_state->lock, irq_flags);
+
+       for (i = 0; i < todo_end; i++) {
+               DPRINT("todo index: %u", i);
+
+               todo = &to_run[i];
+
+               cmd = todo->cmd;
+
+               handle = (uint64_t)todo->handle;
+               time_arg = todo->time_arg;
+               hostcmd_handle = (uint64_t)todo->hostcmd_handle;
+
+               DTRACE();
+
+               timeline = (struct goldfish_sync_timeline_obj *)(uintptr_t)handle;
+
+               switch (cmd) {
+               case CMD_SYNC_READY:
+                       break;
+               case CMD_CREATE_SYNC_TIMELINE:
+                       DPRINT("exec CMD_CREATE_SYNC_TIMELINE: "
+                                       "handle=0x%llx time_arg=%d",
+                                       handle, time_arg);
+                       timeline = goldfish_sync_timeline_create();
+                       timeline_ptr = (uintptr_t)timeline;
+                       goldfish_sync_hostcmd_reply(sync_state, CMD_CREATE_SYNC_TIMELINE,
+                                                                               timeline_ptr,
+                                                                               0,
+                                                                               hostcmd_handle);
+                       DPRINT("sync timeline created: %p", timeline);
+                       break;
+               case CMD_CREATE_SYNC_FENCE:
+                       DPRINT("exec CMD_CREATE_SYNC_FENCE: "
+                                       "handle=0x%llx time_arg=%d",
+                                       handle, time_arg);
+                       sync_fence_fd = goldfish_sync_fence_create(timeline, time_arg);
+                       goldfish_sync_hostcmd_reply(sync_state, CMD_CREATE_SYNC_FENCE,
+                                                                               sync_fence_fd,
+                                                                               0,
+                                                                               hostcmd_handle);
+                       break;
+               case CMD_SYNC_TIMELINE_INC:
+                       DPRINT("exec CMD_SYNC_TIMELINE_INC: "
+                                       "handle=0x%llx time_arg=%d",
+                                       handle, time_arg);
+                       goldfish_sync_timeline_inc(timeline, time_arg);
+                       break;
+               case CMD_DESTROY_SYNC_TIMELINE:
+                       DPRINT("exec CMD_DESTROY_SYNC_TIMELINE: "
+                                       "handle=0x%llx time_arg=%d",
+                                       handle, time_arg);
+                       goldfish_sync_timeline_destroy(timeline);
+                       break;
+               }
+               DPRINT("Done executing sync command");
+       }
+       mutex_unlock(&sync_state->mutex_lock);
+}
+
+/* Guest-side interface: file operations */
+
+/* Goldfish sync context and ioctl info.
+ *
+ * When a sync context is created by open()-ing the goldfish sync device, we
+ * create a sync context (|goldfish_sync_context|).
+ *
+ * Currently, the only data required to track is the sync timeline itself
+ * along with the current time, which are all packed up in the
+ * |goldfish_sync_timeline_obj| field. We use a |goldfish_sync_context|
+ * as the filp->private_data.
+ *
+ * Next, when a sync context user requests that work be queued and a fence
+ * fd provided, we use the |goldfish_sync_ioctl_info| struct, which holds
+ * information about which host handles to touch for this particular
+ * queue-work operation. We need to know about the host-side sync thread
+ * and the particular host-side GLsync object. We also possibly write out
+ * a file descriptor.
+ */
+struct goldfish_sync_context {
+       struct goldfish_sync_timeline_obj *timeline;
+};
+
+struct goldfish_sync_ioctl_info {
+       uint64_t host_glsync_handle_in;
+       uint64_t host_syncthread_handle_in;
+       int fence_fd_out;
+};
+
+static int goldfish_sync_open(struct inode *inode, struct file *file)
+{
+
+       struct goldfish_sync_context *sync_context;
+
+       DTRACE();
+
+       mutex_lock(&global_sync_state->mutex_lock);
+
+       sync_context = kzalloc(sizeof(struct goldfish_sync_context), GFP_KERNEL);
+
+       if (sync_context == NULL) {
+               ERR("Creation of goldfish sync context failed!");
+               mutex_unlock(&global_sync_state->mutex_lock);
+               return -ENOMEM;
+       }
+
+       sync_context->timeline = NULL;
+
+       file->private_data = sync_context;
+
+       DPRINT("successfully create a sync context @0x%p", sync_context);
+
+       mutex_unlock(&global_sync_state->mutex_lock);
+
+       return 0;
+}
+
+static int goldfish_sync_release(struct inode *inode, struct file *file)
+{
+
+       struct goldfish_sync_context *sync_context;
+
+       DTRACE();
+
+       mutex_lock(&global_sync_state->mutex_lock);
+
+       sync_context = file->private_data;
+
+       if (sync_context->timeline)
+               goldfish_sync_timeline_destroy(sync_context->timeline);
+
+       sync_context->timeline = NULL;
+
+       kfree(sync_context);
+
+       mutex_unlock(&global_sync_state->mutex_lock);
+
+       return 0;
+}
+
+/* |goldfish_sync_ioctl| is the guest-facing interface of goldfish sync
+ * and is used in conjunction with eglCreateSyncKHR to queue up the
+ * actual work of waiting for the EGL sync command to complete,
+ * possibly returning a fence fd to the guest.
+ */
+static long goldfish_sync_ioctl(struct file *file,
+                                                               unsigned int cmd,
+                                                               unsigned long arg)
+{
+       struct goldfish_sync_context *sync_context_data;
+       struct goldfish_sync_timeline_obj *timeline;
+       int fd_out;
+       struct goldfish_sync_ioctl_info ioctl_data;
+
+       DTRACE();
+
+       sync_context_data = file->private_data;
+       fd_out = -1;
+
+       switch (cmd) {
+       case GOLDFISH_SYNC_IOC_QUEUE_WORK:
+
+               DPRINT("exec GOLDFISH_SYNC_IOC_QUEUE_WORK");
+
+               mutex_lock(&global_sync_state->mutex_lock);
+
+               if (copy_from_user(&ioctl_data,
+                                               (void __user *)arg,
+                                               sizeof(ioctl_data))) {
+                       ERR("Failed to copy memory for ioctl_data from user.");
+                       mutex_unlock(&global_sync_state->mutex_lock);
+                       return -EFAULT;
+               }
+
+               if (ioctl_data.host_syncthread_handle_in == 0) {
+                       DPRINT("Error: zero host syncthread handle!!!");
+                       mutex_unlock(&global_sync_state->mutex_lock);
+                       return -EFAULT;
+               }
+
+               if (!sync_context_data->timeline) {
+                       DPRINT("no timeline yet, create one.");
+                       sync_context_data->timeline = goldfish_sync_timeline_create();
+                       DPRINT("timeline: 0x%p", &sync_context_data->timeline);
+               }
+
+               timeline = sync_context_data->timeline;
+               fd_out = goldfish_sync_fence_create(timeline,
+                                                                                       timeline->current_time + 1);
+               DPRINT("Created fence with fd %d and current time %u (timeline: 0x%p)",
+                          fd_out,
+                          sync_context_data->timeline->current_time + 1,
+                          sync_context_data->timeline);
+
+               ioctl_data.fence_fd_out = fd_out;
+
+               if (copy_to_user((void __user *)arg,
+                                               &ioctl_data,
+                                               sizeof(ioctl_data))) {
+                       DPRINT("Error, could not copy to user!!!");
+
+                       sys_close(fd_out);
+                       /* We won't be doing an increment, kref_put immediately. */
+                       kref_put(&timeline->kref, delete_timeline_obj);
+                       mutex_unlock(&global_sync_state->mutex_lock);
+                       return -EFAULT;
+               }
+
+               /* We are now about to trigger a host-side wait;
+                * accumulate on |pending_waits|. */
+               goldfish_sync_send_guestcmd(global_sync_state,
+                               CMD_TRIGGER_HOST_WAIT,
+                               ioctl_data.host_glsync_handle_in,
+                               ioctl_data.host_syncthread_handle_in,
+                               (uint64_t)(uintptr_t)(sync_context_data->timeline));
+
+               mutex_unlock(&global_sync_state->mutex_lock);
+               return 0;
+       default:
+               return -ENOTTY;
+       }
+}
+
+static const struct file_operations goldfish_sync_fops = {
+       .owner = THIS_MODULE,
+       .open = goldfish_sync_open,
+       .release = goldfish_sync_release,
+       .unlocked_ioctl = goldfish_sync_ioctl,
+       .compat_ioctl = goldfish_sync_ioctl,
+};
+
+static struct miscdevice goldfish_sync_device = {
+       .name = "goldfish_sync",
+       .fops = &goldfish_sync_fops,
+};
+
+
+static bool setup_verify_batch_cmd_addr(struct goldfish_sync_state *sync_state,
+                                                                               void *batch_addr,
+                                                                               uint32_t addr_offset,
+                                                                               uint32_t addr_offset_high)
+{
+       uint64_t batch_addr_phys;
+       uint32_t batch_addr_phys_test_lo;
+       uint32_t batch_addr_phys_test_hi;
+
+       if (!batch_addr) {
+               ERR("Could not use batch command address!");
+               return false;
+       }
+
+       batch_addr_phys = virt_to_phys(batch_addr);
+       writel((uint32_t)(batch_addr_phys),
+                       sync_state->reg_base + addr_offset);
+       writel((uint32_t)(batch_addr_phys >> 32),
+                       sync_state->reg_base + addr_offset_high);
+
+       batch_addr_phys_test_lo =
+               readl(sync_state->reg_base + addr_offset);
+       batch_addr_phys_test_hi =
+               readl(sync_state->reg_base + addr_offset_high);
+
+       if (virt_to_phys(batch_addr) !=
+                       (((uint64_t)batch_addr_phys_test_hi << 32) |
+                        batch_addr_phys_test_lo)) {
+               ERR("Invalid batch command address!");
+               return false;
+       }
+
+       return true;
+}
+
+int goldfish_sync_probe(struct platform_device *pdev)
+{
+       struct resource *ioresource;
+       struct goldfish_sync_state *sync_state = global_sync_state;
+       int status;
+
+       DTRACE();
+
+       sync_state->to_do_end = 0;
+
+       spin_lock_init(&sync_state->lock);
+       mutex_init(&sync_state->mutex_lock);
+
+       platform_set_drvdata(pdev, sync_state);
+
+       ioresource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (ioresource == NULL) {
+               ERR("platform_get_resource failed");
+               return -ENODEV;
+       }
+
+       sync_state->reg_base = devm_ioremap(&pdev->dev, ioresource->start, PAGE_SIZE);
+       if (sync_state->reg_base == NULL) {
+               ERR("Could not ioremap");
+               return -ENOMEM;
+       }
+
+       sync_state->irq = platform_get_irq(pdev, 0);
+       if (sync_state->irq < 0) {
+               ERR("Could not platform_get_irq");
+               return -ENODEV;
+       }
+
+       status = devm_request_irq(&pdev->dev,
+                                                       sync_state->irq,
+                                                       goldfish_sync_interrupt,
+                                                       IRQF_SHARED,
+                                                       pdev->name,
+                                                       sync_state);
+       if (status) {
+               ERR("request_irq failed");
+               return -ENODEV;
+       }
+
+       INIT_WORK(&sync_state->work_item,
+                         goldfish_sync_work_item_fn);
+
+       misc_register(&goldfish_sync_device);
+
+       /* Obtain addresses for batch send/recv of commands. */
+       {
+               struct goldfish_sync_hostcmd *batch_addr_hostcmd;
+               struct goldfish_sync_guestcmd *batch_addr_guestcmd;
+
+               batch_addr_hostcmd = devm_kzalloc(&pdev->dev, sizeof(struct goldfish_sync_hostcmd),
+                               GFP_KERNEL);
+               batch_addr_guestcmd = devm_kzalloc(&pdev->dev, sizeof(struct goldfish_sync_guestcmd),
+                               GFP_KERNEL);
+
+               if (!setup_verify_batch_cmd_addr(sync_state,
+                                       batch_addr_hostcmd,
+                                       SYNC_REG_BATCH_COMMAND_ADDR,
+                                       SYNC_REG_BATCH_COMMAND_ADDR_HIGH)) {
+                       ERR("goldfish_sync: Could not setup batch command address");
+                       return -ENODEV;
+               }
+
+               if (!setup_verify_batch_cmd_addr(sync_state,
+                                       batch_addr_guestcmd,
+                                       SYNC_REG_BATCH_GUESTCOMMAND_ADDR,
+                                       SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH)) {
+                       ERR("goldfish_sync: Could not setup batch guest command address");
+                       return -ENODEV;
+               }
+
+               sync_state->batch_hostcmd = batch_addr_hostcmd;
+               sync_state->batch_guestcmd = batch_addr_guestcmd;
+       }
+
+       INFO("goldfish_sync: Initialized goldfish sync device");
+
+       writel(0, sync_state->reg_base + SYNC_REG_INIT);
+
+       return 0;
+}
+
+static int goldfish_sync_remove(struct platform_device *pdev)
+{
+       struct goldfish_sync_state *sync_state = global_sync_state;
+
+       DTRACE();
+
+       misc_deregister(&goldfish_sync_device);
+       memset(sync_state, 0, sizeof(struct goldfish_sync_state));
+       return 0;
+}
+
+static const struct of_device_id goldfish_sync_of_match[] = {
+       { .compatible = "google,goldfish-sync", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_sync_of_match);
+
+static const struct acpi_device_id goldfish_sync_acpi_match[] = {
+       { "GFSH0006", 0 },
+       { },
+};
+
+MODULE_DEVICE_TABLE(acpi, goldfish_sync_acpi_match);
+
+static struct platform_driver goldfish_sync = {
+       .probe = goldfish_sync_probe,
+       .remove = goldfish_sync_remove,
+       .driver = {
+               .name = "goldfish_sync",
+               .of_match_table = goldfish_sync_of_match,
+               .acpi_match_table = ACPI_PTR(goldfish_sync_acpi_match),
+       }
+};
+
+module_platform_driver(goldfish_sync);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_DESCRIPTION("Android QEMU Sync Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+/* This function is only to run a basic test of sync framework.
+ * It creates a timeline and fence object whose signal point is at 1.
+ * The timeline is incremented, and we use the sync framework's
+ * sync_fence_wait on that fence object. If everything works out,
+ * we should not hang in the wait and return immediately.
+ * There is no way to explicitly run this test yet, but it
+ * can be used by inserting it at the end of goldfish_sync_probe.
+ */
+void test_kernel_sync(void)
+{
+       struct goldfish_sync_timeline_obj *test_timeline;
+       int test_fence_fd;
+
+       DTRACE();
+
+       DPRINT("test sw_sync");
+
+       test_timeline = goldfish_sync_timeline_create();
+       DPRINT("sw_sync_timeline_create -> 0x%p", test_timeline);
+
+       test_fence_fd = goldfish_sync_fence_create(test_timeline, 1);
+       DPRINT("sync_fence_create -> %d", test_fence_fd);
+
+       DPRINT("incrementing test timeline");
+       goldfish_sync_timeline_inc(test_timeline, 1);
+
+       DPRINT("test waiting (should NOT hang)");
+       sync_fence_wait(
+                       sync_fence_fdget(test_fence_fd), -1);
+
+       DPRINT("test waiting (afterward)");
+}
index 0f82c0b146f6d8eb0bdea4a92e4c8a4d72dd3332..1e332855b93388ded9051ec74f644fc3bd8d8857 100644 (file)
@@ -68,8 +68,7 @@ static void goldfish_tty_do_write(int line, const char *buf, unsigned count)
 
 static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
 {
-       struct platform_device *pdev = dev_id;
-       struct goldfish_tty *qtty = &goldfish_ttys[pdev->id];
+       struct goldfish_tty *qtty = dev_id;
        void __iomem *base = qtty->base;
        unsigned long irq_flags;
        unsigned char *buf;
@@ -233,6 +232,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
        struct device *ttydev;
        void __iomem *base;
        u32 irq;
+       unsigned int line;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (r == NULL)
@@ -248,10 +248,16 @@ static int goldfish_tty_probe(struct platform_device *pdev)
 
        irq = r->start;
 
-       if (pdev->id >= goldfish_tty_line_count)
-               goto err_unmap;
-
        mutex_lock(&goldfish_tty_lock);
+
+       if (pdev->id == PLATFORM_DEVID_NONE)
+               line = goldfish_tty_current_line_count;
+       else
+               line = pdev->id;
+
+       if (line >= goldfish_tty_line_count)
+               goto err_create_driver_failed;
+
        if (goldfish_tty_current_line_count == 0) {
                ret = goldfish_tty_create_driver();
                if (ret)
@@ -259,7 +265,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
        }
        goldfish_tty_current_line_count++;
 
-       qtty = &goldfish_ttys[pdev->id];
+       qtty = &goldfish_ttys[line];
        spin_lock_init(&qtty->lock);
        tty_port_init(&qtty->port);
        qtty->port.ops = &goldfish_port_ops;
@@ -269,13 +275,13 @@ static int goldfish_tty_probe(struct platform_device *pdev)
        writel(GOLDFISH_TTY_CMD_INT_DISABLE, base + GOLDFISH_TTY_CMD);
 
        ret = request_irq(irq, goldfish_tty_interrupt, IRQF_SHARED,
-                                               "goldfish_tty", pdev);
+                                               "goldfish_tty", qtty);
        if (ret)
                goto err_request_irq_failed;
 
 
        ttydev = tty_port_register_device(&qtty->port, goldfish_tty_driver,
-                                                       pdev->id, &pdev->dev);
+                                                       line, &pdev->dev);
        if (IS_ERR(ttydev)) {
                ret = PTR_ERR(ttydev);
                goto err_tty_register_device_failed;
@@ -286,8 +292,9 @@ static int goldfish_tty_probe(struct platform_device *pdev)
        qtty->console.device = goldfish_tty_console_device;
        qtty->console.setup = goldfish_tty_console_setup;
        qtty->console.flags = CON_PRINTBUFFER;
-       qtty->console.index = pdev->id;
+       qtty->console.index = line;
        register_console(&qtty->console);
+       platform_set_drvdata(pdev, qtty);
 
        mutex_unlock(&goldfish_tty_lock);
        return 0;
@@ -307,13 +314,12 @@ err_unmap:
 
 static int goldfish_tty_remove(struct platform_device *pdev)
 {
-       struct goldfish_tty *qtty;
+       struct goldfish_tty *qtty = platform_get_drvdata(pdev);
 
        mutex_lock(&goldfish_tty_lock);
 
-       qtty = &goldfish_ttys[pdev->id];
        unregister_console(&qtty->console);
-       tty_unregister_device(goldfish_tty_driver, pdev->id);
+       tty_unregister_device(goldfish_tty_driver, qtty->console.index);
        iounmap(qtty->base);
        qtty->base = NULL;
        free_irq(qtty->irq, pdev);
@@ -324,11 +330,19 @@ static int goldfish_tty_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id goldfish_tty_of_match[] = {
+       { .compatible = "google,goldfish-tty", },
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, goldfish_tty_of_match);
+
 static struct platform_driver goldfish_tty_platform_driver = {
        .probe = goldfish_tty_probe,
        .remove = goldfish_tty_remove,
        .driver = {
-               .name = "goldfish_tty"
+               .name = "goldfish_tty",
+               .of_match_table = goldfish_tty_of_match,
        }
 };
 
index def5199ca004d1f3b7fa70057b465871a8ef5bb5..62fe36858a75d989ff2fa431c64a96aa850f549c 100644 (file)
@@ -95,6 +95,9 @@ static void __uart_start(struct tty_struct *tty)
        struct uart_state *state = tty->driver_data;
        struct uart_port *port = state->uart_port;
 
+       if (port->ops->wake_peer)
+               port->ops->wake_peer(port);
+
        if (!uart_tx_stopped(port))
                port->ops->start_tx(port);
 }
index 33834aa09ed43be03b0dd0ad418febf0ca25432d..5cf6802b02ae72987146734461f43570a355bf46 100644 (file)
@@ -199,6 +199,18 @@ config USB_F_HID
 config USB_F_PRINTER
        tristate
 
+config USB_F_MTP
+       tristate
+
+config USB_F_PTP
+        tristate
+
+config USB_F_AUDIO_SRC
+       tristate
+
+config USB_F_ACC
+       tristate
+
 choice
        tristate "USB Gadget Drivers"
        default USB_ETH
@@ -371,6 +383,44 @@ config USB_CONFIGFS_F_FS
          implemented in kernel space (for instance Ethernet, serial or
          mass storage) and other are implemented in user space.
 
+config USB_CONFIGFS_F_MTP
+       boolean "MTP gadget"
+       depends on USB_CONFIGFS
+       select USB_F_MTP
+       help
+         USB gadget MTP support
+
+config USB_CONFIGFS_F_PTP
+       boolean "PTP gadget"
+       depends on USB_CONFIGFS && USB_CONFIGFS_F_MTP
+       select USB_F_PTP
+       help
+         USB gadget PTP support
+
+config USB_CONFIGFS_F_ACC
+       boolean "Accessory gadget"
+       depends on USB_CONFIGFS
+       select USB_F_ACC
+       help
+         USB gadget Accessory support
+
+config USB_CONFIGFS_F_AUDIO_SRC
+       boolean "Audio Source gadget"
+       depends on USB_CONFIGFS && USB_CONFIGFS_F_ACC
+       depends on SND
+       select SND_PCM
+       select USB_F_AUDIO_SRC
+       help
+         USB gadget Audio Source support
+
+config USB_CONFIGFS_UEVENT
+       boolean "Uevent notification of Gadget state"
+       depends on USB_CONFIGFS
+       help
+         Enable uevent notifications to userspace when the gadget
+         state changes. The gadget can be in any of the following
+         three states: "CONNECTED/DISCONNECTED/CONFIGURED"
+
 config USB_CONFIGFS_F_UAC1
        bool "Audio Class 1.0"
        depends on USB_CONFIGFS
index 8b14c2a13ac51abc9d3f433a0261c599ec8cdaf6..ffa2a682c04b09b82bd10b334ba8ff6a1503b958 100644 (file)
@@ -1870,6 +1870,12 @@ void composite_disconnect(struct usb_gadget *gadget)
        struct usb_composite_dev        *cdev = get_gadget_data(gadget);
        unsigned long                   flags;
 
+       if (cdev == NULL) {
+               WARN(1, "%s: Calling disconnect on a Gadget that is \
+                        not connected\n", __func__);
+               return;
+       }
+
        /* REVISIT:  should we have config and device level
         * disconnect callbacks?
         */
index 163d305e1200bb07a73357d7b5824d97b362b924..54849fe9cb01ed8c6b64ebfc3c3e08edd243af89 100644 (file)
@@ -9,6 +9,31 @@
 #include "u_f.h"
 #include "u_os_desc.h"
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+#include <linux/platform_device.h>
+#include <linux/kdev_t.h>
+#include <linux/usb/ch9.h>
+
+#ifdef CONFIG_USB_CONFIGFS_F_ACC
+extern int acc_ctrlrequest(struct usb_composite_dev *cdev,
+                               const struct usb_ctrlrequest *ctrl);
+void acc_disconnect(void);
+#endif
+static struct class *android_class;
+static struct device *android_device;
+static int index;
+
+struct device *create_function_device(char *name)
+{
+       if (android_device && !IS_ERR(android_device))
+               return device_create(android_class, android_device,
+                       MKDEV(0, index++), NULL, name);
+       else
+               return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(create_function_device);
+#endif
+
 int check_user_usb_string(const char *name,
                struct usb_gadget_strings *stringtab_dev)
 {
@@ -62,6 +87,12 @@ struct gadget_info {
        bool use_os_desc;
        char b_vendor_code;
        char qw_sign[OS_STRING_QW_SIGN_LEN];
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+       bool connected;
+       bool sw_connected;
+       struct work_struct work;
+       struct device *dev;
+#endif
 };
 
 static inline struct gadget_info *to_gadget_info(struct config_item *item)
@@ -266,7 +297,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
 
        mutex_lock(&gi->lock);
 
-       if (!strlen(name)) {
+       if (!strlen(name) || strcmp(name, "none") == 0) {
                ret = unregister_gadget(gi);
                if (ret)
                        goto err;
@@ -1373,6 +1404,60 @@ err_comp_cleanup:
        return ret;
 }
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+static void android_work(struct work_struct *data)
+{
+       struct gadget_info *gi = container_of(data, struct gadget_info, work);
+       struct usb_composite_dev *cdev = &gi->cdev;
+       char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL };
+       char *connected[2]    = { "USB_STATE=CONNECTED", NULL };
+       char *configured[2]   = { "USB_STATE=CONFIGURED", NULL };
+       /* 0-connected 1-configured 2-disconnected*/
+       bool status[3] = { false, false, false };
+       unsigned long flags;
+       bool uevent_sent = false;
+
+       spin_lock_irqsave(&cdev->lock, flags);
+       if (cdev->config)
+               status[1] = true;
+
+       if (gi->connected != gi->sw_connected) {
+               if (gi->connected)
+                       status[0] = true;
+               else
+                       status[2] = true;
+               gi->sw_connected = gi->connected;
+       }
+       spin_unlock_irqrestore(&cdev->lock, flags);
+
+       if (status[0]) {
+               kobject_uevent_env(&android_device->kobj,
+                                       KOBJ_CHANGE, connected);
+               pr_info("%s: sent uevent %s\n", __func__, connected[0]);
+               uevent_sent = true;
+       }
+
+       if (status[1]) {
+               kobject_uevent_env(&android_device->kobj,
+                                       KOBJ_CHANGE, configured);
+               pr_info("%s: sent uevent %s\n", __func__, configured[0]);
+               uevent_sent = true;
+       }
+
+       if (status[2]) {
+               kobject_uevent_env(&android_device->kobj,
+                                       KOBJ_CHANGE, disconnected);
+               pr_info("%s: sent uevent %s\n", __func__, disconnected[0]);
+               uevent_sent = true;
+       }
+
+       if (!uevent_sent) {
+               pr_info("%s: did not send uevent (%d %d %p)\n", __func__,
+                       gi->connected, gi->sw_connected, cdev->config);
+       }
+}
+#endif
+
 static void configfs_composite_unbind(struct usb_gadget *gadget)
 {
        struct usb_composite_dev        *cdev;
@@ -1392,14 +1477,79 @@ static void configfs_composite_unbind(struct usb_gadget *gadget)
        set_gadget_data(gadget, NULL);
 }
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+static int android_setup(struct usb_gadget *gadget,
+                       const struct usb_ctrlrequest *c)
+{
+       struct usb_composite_dev *cdev = get_gadget_data(gadget);
+       unsigned long flags;
+       struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev);
+       int value = -EOPNOTSUPP;
+       struct usb_function_instance *fi;
+
+       spin_lock_irqsave(&cdev->lock, flags);
+       if (!gi->connected) {
+               gi->connected = 1;
+               schedule_work(&gi->work);
+       }
+       spin_unlock_irqrestore(&cdev->lock, flags);
+       list_for_each_entry(fi, &gi->available_func, cfs_list) {
+               if (fi != NULL && fi->f != NULL && fi->f->setup != NULL) {
+                       value = fi->f->setup(fi->f, c);
+                       if (value >= 0)
+                               break;
+               }
+       }
+
+#ifdef CONFIG_USB_CONFIGFS_F_ACC
+       if (value < 0)
+               value = acc_ctrlrequest(cdev, c);
+#endif
+
+       if (value < 0)
+               value = composite_setup(gadget, c);
+
+       spin_lock_irqsave(&cdev->lock, flags);
+       if (c->bRequest == USB_REQ_SET_CONFIGURATION &&
+                                               cdev->config) {
+               schedule_work(&gi->work);
+       }
+       spin_unlock_irqrestore(&cdev->lock, flags);
+
+       return value;
+}
+
+static void android_disconnect(struct usb_gadget *gadget)
+{
+       struct usb_composite_dev        *cdev = get_gadget_data(gadget);
+       struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev);
+
+       /* accessory HID support can be active while the
+               accessory function is not actually enabled,
+               so we need to inform it when we are disconnected.
+       */
+
+#ifdef CONFIG_USB_CONFIGFS_F_ACC
+       acc_disconnect();
+#endif
+       gi->connected = 0;
+       schedule_work(&gi->work);
+       composite_disconnect(gadget);
+}
+#endif
+
 static const struct usb_gadget_driver configfs_driver_template = {
        .bind           = configfs_composite_bind,
        .unbind         = configfs_composite_unbind,
-
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+       .setup          = android_setup,
+       .reset          = android_disconnect,
+       .disconnect     = android_disconnect,
+#else
        .setup          = composite_setup,
        .reset          = composite_disconnect,
        .disconnect     = composite_disconnect,
-
+#endif
        .suspend        = composite_suspend,
        .resume         = composite_resume,
 
@@ -1410,6 +1560,89 @@ static const struct usb_gadget_driver configfs_driver_template = {
        },
 };
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+static ssize_t state_show(struct device *pdev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct gadget_info *dev = dev_get_drvdata(pdev);
+       struct usb_composite_dev *cdev;
+       char *state = "DISCONNECTED";
+       unsigned long flags;
+
+       if (!dev)
+               goto out;
+
+       cdev = &dev->cdev;
+
+       if (!cdev)
+               goto out;
+
+       spin_lock_irqsave(&cdev->lock, flags);
+       if (cdev->config)
+               state = "CONFIGURED";
+       else if (dev->connected)
+               state = "CONNECTED";
+       spin_unlock_irqrestore(&cdev->lock, flags);
+out:
+       return sprintf(buf, "%s\n", state);
+}
+
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+
+static struct device_attribute *android_usb_attributes[] = {
+       &dev_attr_state,
+       NULL
+};
+
+static int android_device_create(struct gadget_info *gi)
+{
+       struct device_attribute **attrs;
+       struct device_attribute *attr;
+
+       INIT_WORK(&gi->work, android_work);
+       android_device = device_create(android_class, NULL,
+                               MKDEV(0, 0), NULL, "android0");
+       if (IS_ERR(android_device))
+               return PTR_ERR(android_device);
+
+       dev_set_drvdata(android_device, gi);
+
+       attrs = android_usb_attributes;
+       while ((attr = *attrs++)) {
+               int err;
+
+               err = device_create_file(android_device, attr);
+               if (err) {
+                       device_destroy(android_device->class,
+                                      android_device->devt);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+static void android_device_destroy(void)
+{
+       struct device_attribute **attrs;
+       struct device_attribute *attr;
+
+       attrs = android_usb_attributes;
+       while ((attr = *attrs++))
+               device_remove_file(android_device, attr);
+       device_destroy(android_device->class, android_device->devt);
+}
+#else
+static inline int android_device_create(struct gadget_info *gi)
+{
+       return 0;
+}
+
+static inline void android_device_destroy(void)
+{
+}
+#endif
+
 static struct config_group *gadgets_make(
                struct config_group *group,
                const char *name)
@@ -1419,7 +1652,6 @@ static struct config_group *gadgets_make(
        gi = kzalloc(sizeof(*gi), GFP_KERNEL);
        if (!gi)
                return ERR_PTR(-ENOMEM);
-
        gi->group.default_groups = gi->default_groups;
        gi->group.default_groups[0] = &gi->functions_group;
        gi->group.default_groups[1] = &gi->configs_group;
@@ -1458,9 +1690,13 @@ static struct config_group *gadgets_make(
        if (!gi->composite.gadget_driver.function)
                goto err;
 
+       if (android_device_create(gi) < 0)
+               goto err;
+
        config_group_init_type_name(&gi->group, name,
                                &gadget_root_type);
        return &gi->group;
+
 err:
        kfree(gi);
        return ERR_PTR(-ENOMEM);
@@ -1469,6 +1705,7 @@ err:
 static void gadgets_drop(struct config_group *group, struct config_item *item)
 {
        config_item_put(item);
+       android_device_destroy();
 }
 
 static struct configfs_group_operations gadgets_ops = {
@@ -1495,7 +1732,9 @@ void unregister_gadget_item(struct config_item *item)
 {
        struct gadget_info *gi = to_gadget_info(item);
 
+       mutex_lock(&gi->lock);
        unregister_gadget(gi);
+       mutex_unlock(&gi->lock);
 }
 EXPORT_SYMBOL_GPL(unregister_gadget_item);
 
@@ -1506,6 +1745,13 @@ static int __init gadget_cfs_init(void)
        config_group_init(&gadget_subsys.su_group);
 
        ret = configfs_register_subsystem(&gadget_subsys);
+
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+       android_class = class_create(THIS_MODULE, "android_usb");
+       if (IS_ERR(android_class))
+               return PTR_ERR(android_class);
+#endif
+
        return ret;
 }
 module_init(gadget_cfs_init);
@@ -1513,5 +1759,10 @@ module_init(gadget_cfs_init);
 static void __exit gadget_cfs_exit(void)
 {
        configfs_unregister_subsystem(&gadget_subsys);
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+       if (!IS_ERR(android_class))
+               class_destroy(android_class);
+#endif
+
 }
 module_exit(gadget_cfs_exit);
index bd7def576955d825bf1651508fd715c964bd33c2..1cd544beef634576d382773e0a5ee91bf00a5087 100644 (file)
@@ -44,3 +44,11 @@ usb_f_hid-y                  := f_hid.o
 obj-$(CONFIG_USB_F_HID)                += usb_f_hid.o
 usb_f_printer-y                        := f_printer.o
 obj-$(CONFIG_USB_F_PRINTER)    += usb_f_printer.o
+usb_f_mtp-y                     := f_mtp.o
+obj-$(CONFIG_USB_F_MTP)         += usb_f_mtp.o
+usb_f_ptp-y                     := f_ptp.o
+obj-$(CONFIG_USB_F_PTP)         += usb_f_ptp.o
+usb_f_audio_source-y            := f_audio_source.o
+obj-$(CONFIG_USB_F_AUDIO_SRC)   += usb_f_audio_source.o
+usb_f_accessory-y               := f_accessory.o
+obj-$(CONFIG_USB_F_ACC)         += usb_f_accessory.o
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
new file mode 100644 (file)
index 0000000..9d3ec0e
--- /dev/null
@@ -0,0 +1,1328 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/hid.h>
+#include <linux/hiddev.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_accessory.h>
+
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+
+#define MAX_INST_NAME_LEN        40
+#define BULK_BUFFER_SIZE    16384
+#define ACC_STRING_SIZE     256
+
+#define PROTOCOL_VERSION    2
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX 0
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+
+struct acc_hid_dev {
+       struct list_head        list;
+       struct hid_device *hid;
+       struct acc_dev *dev;
+       /* accessory defined ID */
+       int id;
+       /* HID report descriptor */
+       u8 *report_desc;
+       /* length of HID report descriptor */
+       int report_desc_len;
+       /* number of bytes of report_desc we have received so far */
+       int report_desc_offset;
+};
+
+struct acc_dev {
+       struct usb_function function;
+       struct usb_composite_dev *cdev;
+       spinlock_t lock;
+
+       struct usb_ep *ep_in;
+       struct usb_ep *ep_out;
+
+       /* set to 1 when we connect */
+       int online:1;
+       /* Set to 1 when we disconnect.
+        * Not cleared until our file is closed.
+        */
+       int disconnected:1;
+
+       /* strings sent by the host */
+       char manufacturer[ACC_STRING_SIZE];
+       char model[ACC_STRING_SIZE];
+       char description[ACC_STRING_SIZE];
+       char version[ACC_STRING_SIZE];
+       char uri[ACC_STRING_SIZE];
+       char serial[ACC_STRING_SIZE];
+
+       /* for acc_complete_set_string */
+       int string_index;
+
+       /* set to 1 if we have a pending start request */
+       int start_requested;
+
+       int audio_mode;
+
+       /* synchronize access to our device file */
+       atomic_t open_excl;
+
+       struct list_head tx_idle;
+
+       wait_queue_head_t read_wq;
+       wait_queue_head_t write_wq;
+       struct usb_request *rx_req[RX_REQ_MAX];
+       int rx_done;
+
+       /* delayed work for handling ACCESSORY_START */
+       struct delayed_work start_work;
+
+       /* worker for registering and unregistering hid devices */
+       struct work_struct hid_work;
+
+       /* list of active HID devices */
+       struct list_head        hid_list;
+
+       /* list of new HID devices to register */
+       struct list_head        new_hid_list;
+
+       /* list of dead HID devices to unregister */
+       struct list_head        dead_hid_list;
+};
+
+static struct usb_interface_descriptor acc_interface_desc = {
+       .bLength                = USB_DT_INTERFACE_SIZE,
+       .bDescriptorType        = USB_DT_INTERFACE,
+       .bInterfaceNumber       = 0,
+       .bNumEndpoints          = 2,
+       .bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
+       .bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
+       .bInterfaceProtocol     = 0,
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_IN,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+       .wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_OUT,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+       .wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_IN,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_out_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_OUT,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_acc_descs[] = {
+       (struct usb_descriptor_header *) &acc_interface_desc,
+       (struct usb_descriptor_header *) &acc_fullspeed_in_desc,
+       (struct usb_descriptor_header *) &acc_fullspeed_out_desc,
+       NULL,
+};
+
+static struct usb_descriptor_header *hs_acc_descs[] = {
+       (struct usb_descriptor_header *) &acc_interface_desc,
+       (struct usb_descriptor_header *) &acc_highspeed_in_desc,
+       (struct usb_descriptor_header *) &acc_highspeed_out_desc,
+       NULL,
+};
+
+static struct usb_string acc_string_defs[] = {
+       [INTERFACE_STRING_INDEX].s      = "Android Accessory Interface",
+       {  },   /* end of list */
+};
+
+static struct usb_gadget_strings acc_string_table = {
+       .language               = 0x0409,       /* en-US */
+       .strings                = acc_string_defs,
+};
+
+static struct usb_gadget_strings *acc_strings[] = {
+       &acc_string_table,
+       NULL,
+};
+
+/* temporary variable used between acc_open() and acc_gadget_bind() */
+static struct acc_dev *_acc_dev;
+
+struct acc_instance {
+       struct usb_function_instance func_inst;
+       const char *name;
+};
+
+static inline struct acc_dev *func_to_dev(struct usb_function *f)
+{
+       return container_of(f, struct acc_dev, function);
+}
+
+static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size)
+{
+       struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+
+       if (!req)
+               return NULL;
+
+       /* now allocate buffers for the requests */
+       req->buf = kmalloc(buffer_size, GFP_KERNEL);
+       if (!req->buf) {
+               usb_ep_free_request(ep, req);
+               return NULL;
+       }
+
+       return req;
+}
+
+static void acc_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+       if (req) {
+               kfree(req->buf);
+               usb_ep_free_request(ep, req);
+       }
+}
+
+/* add a request to the tail of a list */
+static void req_put(struct acc_dev *dev, struct list_head *head,
+               struct usb_request *req)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       list_add_tail(&req->list, head);
+       spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head)
+{
+       unsigned long flags;
+       struct usb_request *req;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       if (list_empty(head)) {
+               req = 0;
+       } else {
+               req = list_first_entry(head, struct usb_request, list);
+               list_del(&req->list);
+       }
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return req;
+}
+
+static void acc_set_disconnected(struct acc_dev *dev)
+{
+       dev->online = 0;
+       dev->disconnected = 1;
+}
+
+static void acc_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+       struct acc_dev *dev = _acc_dev;
+
+       if (req->status == -ESHUTDOWN) {
+               pr_debug("acc_complete_in set disconnected");
+               acc_set_disconnected(dev);
+       }
+
+       req_put(dev, &dev->tx_idle, req);
+
+       wake_up(&dev->write_wq);
+}
+
+static void acc_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+       struct acc_dev *dev = _acc_dev;
+
+       dev->rx_done = 1;
+       if (req->status == -ESHUTDOWN) {
+               pr_debug("acc_complete_out set disconnected");
+               acc_set_disconnected(dev);
+       }
+
+       wake_up(&dev->read_wq);
+}
+
+static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
+{
+       struct acc_dev  *dev = ep->driver_data;
+       char *string_dest = NULL;
+       int length = req->actual;
+
+       if (req->status != 0) {
+               pr_err("acc_complete_set_string, err %d\n", req->status);
+               return;
+       }
+
+       switch (dev->string_index) {
+       case ACCESSORY_STRING_MANUFACTURER:
+               string_dest = dev->manufacturer;
+               break;
+       case ACCESSORY_STRING_MODEL:
+               string_dest = dev->model;
+               break;
+       case ACCESSORY_STRING_DESCRIPTION:
+               string_dest = dev->description;
+               break;
+       case ACCESSORY_STRING_VERSION:
+               string_dest = dev->version;
+               break;
+       case ACCESSORY_STRING_URI:
+               string_dest = dev->uri;
+               break;
+       case ACCESSORY_STRING_SERIAL:
+               string_dest = dev->serial;
+               break;
+       }
+       if (string_dest) {
+               unsigned long flags;
+
+               if (length >= ACC_STRING_SIZE)
+                       length = ACC_STRING_SIZE - 1;
+
+               spin_lock_irqsave(&dev->lock, flags);
+               memcpy(string_dest, req->buf, length);
+               /* ensure zero termination */
+               string_dest[length] = 0;
+               spin_unlock_irqrestore(&dev->lock, flags);
+       } else {
+               pr_err("unknown accessory string index %d\n",
+                       dev->string_index);
+       }
+}
+
+static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
+               struct usb_request *req)
+{
+       struct acc_hid_dev *hid = req->context;
+       struct acc_dev *dev = hid->dev;
+       int length = req->actual;
+
+       if (req->status != 0) {
+               pr_err("acc_complete_set_hid_report_desc, err %d\n",
+                       req->status);
+               return;
+       }
+
+       memcpy(hid->report_desc + hid->report_desc_offset, req->buf, length);
+       hid->report_desc_offset += length;
+       if (hid->report_desc_offset == hid->report_desc_len) {
+               /* After we have received the entire report descriptor
+                * we schedule work to initialize the HID device
+                */
+               schedule_work(&dev->hid_work);
+       }
+}
+
+static void acc_complete_send_hid_event(struct usb_ep *ep,
+               struct usb_request *req)
+{
+       struct acc_hid_dev *hid = req->context;
+       int length = req->actual;
+
+       if (req->status != 0) {
+               pr_err("acc_complete_send_hid_event, err %d\n", req->status);
+               return;
+       }
+
+       hid_report_raw_event(hid->hid, HID_INPUT_REPORT, req->buf, length, 1);
+}
+
+static int acc_hid_parse(struct hid_device *hid)
+{
+       struct acc_hid_dev *hdev = hid->driver_data;
+
+       hid_parse_report(hid, hdev->report_desc, hdev->report_desc_len);
+       return 0;
+}
+
+static int acc_hid_start(struct hid_device *hid)
+{
+       return 0;
+}
+
+static void acc_hid_stop(struct hid_device *hid)
+{
+}
+
+static int acc_hid_open(struct hid_device *hid)
+{
+       return 0;
+}
+
+static void acc_hid_close(struct hid_device *hid)
+{
+}
+
+static int acc_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
+       __u8 *buf, size_t len, unsigned char rtype, int reqtype)
+{
+       return 0;
+}
+
+static struct hid_ll_driver acc_hid_ll_driver = {
+       .parse = acc_hid_parse,
+       .start = acc_hid_start,
+       .stop = acc_hid_stop,
+       .open = acc_hid_open,
+       .close = acc_hid_close,
+       .raw_request = acc_hid_raw_request,
+};
+
+static struct acc_hid_dev *acc_hid_new(struct acc_dev *dev,
+               int id, int desc_len)
+{
+       struct acc_hid_dev *hdev;
+
+       hdev = kzalloc(sizeof(*hdev), GFP_ATOMIC);
+       if (!hdev)
+               return NULL;
+       hdev->report_desc = kzalloc(desc_len, GFP_ATOMIC);
+       if (!hdev->report_desc) {
+               kfree(hdev);
+               return NULL;
+       }
+       hdev->dev = dev;
+       hdev->id = id;
+       hdev->report_desc_len = desc_len;
+
+       return hdev;
+}
+
+static struct acc_hid_dev *acc_hid_get(struct list_head *list, int id)
+{
+       struct acc_hid_dev *hid;
+
+       list_for_each_entry(hid, list, list) {
+               if (hid->id == id)
+                       return hid;
+       }
+       return NULL;
+}
+
+static int acc_register_hid(struct acc_dev *dev, int id, int desc_length)
+{
+       struct acc_hid_dev *hid;
+       unsigned long flags;
+
+       /* report descriptor length must be > 0 */
+       if (desc_length <= 0)
+               return -EINVAL;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       /* replace HID if one already exists with this ID */
+       hid = acc_hid_get(&dev->hid_list, id);
+       if (!hid)
+               hid = acc_hid_get(&dev->new_hid_list, id);
+       if (hid)
+               list_move(&hid->list, &dev->dead_hid_list);
+
+       hid = acc_hid_new(dev, id, desc_length);
+       if (!hid) {
+               spin_unlock_irqrestore(&dev->lock, flags);
+               return -ENOMEM;
+       }
+
+       list_add(&hid->list, &dev->new_hid_list);
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       /* schedule work to register the HID device */
+       schedule_work(&dev->hid_work);
+       return 0;
+}
+
+static int acc_unregister_hid(struct acc_dev *dev, int id)
+{
+       struct acc_hid_dev *hid;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       hid = acc_hid_get(&dev->hid_list, id);
+       if (!hid)
+               hid = acc_hid_get(&dev->new_hid_list, id);
+       if (!hid) {
+               spin_unlock_irqrestore(&dev->lock, flags);
+               return -EINVAL;
+       }
+
+       list_move(&hid->list, &dev->dead_hid_list);
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       schedule_work(&dev->hid_work);
+       return 0;
+}
+
+static int create_bulk_endpoints(struct acc_dev *dev,
+                               struct usb_endpoint_descriptor *in_desc,
+                               struct usb_endpoint_descriptor *out_desc)
+{
+       struct usb_composite_dev *cdev = dev->cdev;
+       struct usb_request *req;
+       struct usb_ep *ep;
+       int i;
+
+       DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+       ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+       if (!ep) {
+               DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+               return -ENODEV;
+       }
+       DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+       ep->driver_data = dev;          /* claim the endpoint */
+       dev->ep_in = ep;
+
+       ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+       if (!ep) {
+               DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+               return -ENODEV;
+       }
+       DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+       ep->driver_data = dev;          /* claim the endpoint */
+       dev->ep_out = ep;
+
+       /* now allocate requests for our endpoints */
+       for (i = 0; i < TX_REQ_MAX; i++) {
+               req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
+               if (!req)
+                       goto fail;
+               req->complete = acc_complete_in;
+               req_put(dev, &dev->tx_idle, req);
+       }
+       for (i = 0; i < RX_REQ_MAX; i++) {
+               req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE);
+               if (!req)
+                       goto fail;
+               req->complete = acc_complete_out;
+               dev->rx_req[i] = req;
+       }
+
+       return 0;
+
+fail:
+       pr_err("acc_bind() could not allocate requests\n");
+       while ((req = req_get(dev, &dev->tx_idle)))
+               acc_request_free(req, dev->ep_in);
+       for (i = 0; i < RX_REQ_MAX; i++)
+               acc_request_free(dev->rx_req[i], dev->ep_out);
+       return -1;
+}
+
+static ssize_t acc_read(struct file *fp, char __user *buf,
+       size_t count, loff_t *pos)
+{
+       struct acc_dev *dev = fp->private_data;
+       struct usb_request *req;
+       ssize_t r = count;
+       unsigned xfer;
+       int ret = 0;
+
+       pr_debug("acc_read(%zu)\n", count);
+
+       if (dev->disconnected) {
+               pr_debug("acc_read disconnected");
+               return -ENODEV;
+       }
+
+       if (count > BULK_BUFFER_SIZE)
+               count = BULK_BUFFER_SIZE;
+
+       /* we will block until we're online */
+       pr_debug("acc_read: waiting for online\n");
+       ret = wait_event_interruptible(dev->read_wq, dev->online);
+       if (ret < 0) {
+               r = ret;
+               goto done;
+       }
+
+       if (dev->rx_done) {
+               // last req cancelled. try to get it.
+               req = dev->rx_req[0];
+               goto copy_data;
+       }
+
+requeue_req:
+       /* queue a request */
+       req = dev->rx_req[0];
+       req->length = count;
+       dev->rx_done = 0;
+       ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+       if (ret < 0) {
+               r = -EIO;
+               goto done;
+       } else {
+               pr_debug("rx %p queue\n", req);
+       }
+
+       /* wait for a request to complete */
+       ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+       if (ret < 0) {
+               r = ret;
+               ret = usb_ep_dequeue(dev->ep_out, req);
+               if (ret != 0) {
+                       // cancel failed. There can be a data already received.
+                       // it will be retrieved in the next read.
+                       pr_debug("acc_read: cancelling failed %d", ret);
+               }
+               goto done;
+       }
+
+copy_data:
+       dev->rx_done = 0;
+       if (dev->online) {
+               /* If we got a 0-len packet, throw it back and try again. */
+               if (req->actual == 0)
+                       goto requeue_req;
+
+               pr_debug("rx %p %u\n", req, req->actual);
+               xfer = (req->actual < count) ? req->actual : count;
+               r = xfer;
+               if (copy_to_user(buf, req->buf, xfer))
+                       r = -EFAULT;
+       } else
+               r = -EIO;
+
+done:
+       pr_debug("acc_read returning %zd\n", r);
+       return r;
+}
+
+static ssize_t acc_write(struct file *fp, const char __user *buf,
+       size_t count, loff_t *pos)
+{
+       struct acc_dev *dev = fp->private_data;
+       struct usb_request *req = 0;
+       ssize_t r = count;
+       unsigned xfer;
+       int ret;
+
+       pr_debug("acc_write(%zu)\n", count);
+
+       if (!dev->online || dev->disconnected) {
+               pr_debug("acc_write disconnected or not online");
+               return -ENODEV;
+       }
+
+       while (count > 0) {
+               if (!dev->online) {
+                       pr_debug("acc_write dev->error\n");
+                       r = -EIO;
+                       break;
+               }
+
+               /* get an idle tx request to use */
+               req = 0;
+               ret = wait_event_interruptible(dev->write_wq,
+                       ((req = req_get(dev, &dev->tx_idle)) || !dev->online));
+               if (!req) {
+                       r = ret;
+                       break;
+               }
+
+               if (count > BULK_BUFFER_SIZE) {
+                       xfer = BULK_BUFFER_SIZE;
+                       /* ZLP, They will be more TX requests so not yet. */
+                       req->zero = 0;
+               } else {
+                       xfer = count;
+                       /* If the data length is a multple of the
+                        * maxpacket size then send a zero length packet(ZLP).
+                       */
+                       req->zero = ((xfer % dev->ep_in->maxpacket) == 0);
+               }
+               if (copy_from_user(req->buf, buf, xfer)) {
+                       r = -EFAULT;
+                       break;
+               }
+
+               req->length = xfer;
+               ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+               if (ret < 0) {
+                       pr_debug("acc_write: xfer error %d\n", ret);
+                       r = -EIO;
+                       break;
+               }
+
+               buf += xfer;
+               count -= xfer;
+
+               /* zero this so we don't try to free it on error exit */
+               req = 0;
+       }
+
+       if (req)
+               req_put(dev, &dev->tx_idle, req);
+
+       pr_debug("acc_write returning %zd\n", r);
+       return r;
+}
+
+static long acc_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+       struct acc_dev *dev = fp->private_data;
+       char *src = NULL;
+       int ret;
+
+       switch (code) {
+       case ACCESSORY_GET_STRING_MANUFACTURER:
+               src = dev->manufacturer;
+               break;
+       case ACCESSORY_GET_STRING_MODEL:
+               src = dev->model;
+               break;
+       case ACCESSORY_GET_STRING_DESCRIPTION:
+               src = dev->description;
+               break;
+       case ACCESSORY_GET_STRING_VERSION:
+               src = dev->version;
+               break;
+       case ACCESSORY_GET_STRING_URI:
+               src = dev->uri;
+               break;
+       case ACCESSORY_GET_STRING_SERIAL:
+               src = dev->serial;
+               break;
+       case ACCESSORY_IS_START_REQUESTED:
+               return dev->start_requested;
+       case ACCESSORY_GET_AUDIO_MODE:
+               return dev->audio_mode;
+       }
+       if (!src)
+               return -EINVAL;
+
+       ret = strlen(src) + 1;
+       if (copy_to_user((void __user *)value, src, ret))
+               ret = -EFAULT;
+       return ret;
+}
+
+static int acc_open(struct inode *ip, struct file *fp)
+{
+       printk(KERN_INFO "acc_open\n");
+       if (atomic_xchg(&_acc_dev->open_excl, 1))
+               return -EBUSY;
+
+       _acc_dev->disconnected = 0;
+       fp->private_data = _acc_dev;
+       return 0;
+}
+
+static int acc_release(struct inode *ip, struct file *fp)
+{
+       printk(KERN_INFO "acc_release\n");
+
+       WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0));
+       _acc_dev->disconnected = 0;
+       return 0;
+}
+
+/* file operations for /dev/usb_accessory */
+static const struct file_operations acc_fops = {
+       .owner = THIS_MODULE,
+       .read = acc_read,
+       .write = acc_write,
+       .unlocked_ioctl = acc_ioctl,
+       .open = acc_open,
+       .release = acc_release,
+};
+
+static int acc_hid_probe(struct hid_device *hdev,
+               const struct hid_device_id *id)
+{
+       int ret;
+
+       ret = hid_parse(hdev);
+       if (ret)
+               return ret;
+       return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
+static struct miscdevice acc_device = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "usb_accessory",
+       .fops = &acc_fops,
+};
+
+static const struct hid_device_id acc_hid_table[] = {
+       { HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+       { }
+};
+
+static struct hid_driver acc_hid_driver = {
+       .name = "USB accessory",
+       .id_table = acc_hid_table,
+       .probe = acc_hid_probe,
+};
+
+int acc_ctrlrequest(struct usb_composite_dev *cdev,
+                               const struct usb_ctrlrequest *ctrl)
+{
+       struct acc_dev  *dev = _acc_dev;
+       int     value = -EOPNOTSUPP;
+       struct acc_hid_dev *hid;
+       int offset;
+       u8 b_requestType = ctrl->bRequestType;
+       u8 b_request = ctrl->bRequest;
+       u16     w_index = le16_to_cpu(ctrl->wIndex);
+       u16     w_value = le16_to_cpu(ctrl->wValue);
+       u16     w_length = le16_to_cpu(ctrl->wLength);
+       unsigned long flags;
+
+/*
+       printk(KERN_INFO "acc_ctrlrequest "
+                       "%02x.%02x v%04x i%04x l%u\n",
+                       b_requestType, b_request,
+                       w_value, w_index, w_length);
+*/
+
+       if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {
+               if (b_request == ACCESSORY_START) {
+                       dev->start_requested = 1;
+                       schedule_delayed_work(
+                               &dev->start_work, msecs_to_jiffies(10));
+                       value = 0;
+               } else if (b_request == ACCESSORY_SEND_STRING) {
+                       dev->string_index = w_index;
+                       cdev->gadget->ep0->driver_data = dev;
+                       cdev->req->complete = acc_complete_set_string;
+                       value = w_length;
+               } else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
+                               w_index == 0 && w_length == 0) {
+                       dev->audio_mode = w_value;
+                       value = 0;
+               } else if (b_request == ACCESSORY_REGISTER_HID) {
+                       value = acc_register_hid(dev, w_value, w_index);
+               } else if (b_request == ACCESSORY_UNREGISTER_HID) {
+                       value = acc_unregister_hid(dev, w_value);
+               } else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) {
+                       spin_lock_irqsave(&dev->lock, flags);
+                       hid = acc_hid_get(&dev->new_hid_list, w_value);
+                       spin_unlock_irqrestore(&dev->lock, flags);
+                       if (!hid) {
+                               value = -EINVAL;
+                               goto err;
+                       }
+                       offset = w_index;
+                       if (offset != hid->report_desc_offset
+                               || offset + w_length > hid->report_desc_len) {
+                               value = -EINVAL;
+                               goto err;
+                       }
+                       cdev->req->context = hid;
+                       cdev->req->complete = acc_complete_set_hid_report_desc;
+                       value = w_length;
+               } else if (b_request == ACCESSORY_SEND_HID_EVENT) {
+                       spin_lock_irqsave(&dev->lock, flags);
+                       hid = acc_hid_get(&dev->hid_list, w_value);
+                       spin_unlock_irqrestore(&dev->lock, flags);
+                       if (!hid) {
+                               value = -EINVAL;
+                               goto err;
+                       }
+                       cdev->req->context = hid;
+                       cdev->req->complete = acc_complete_send_hid_event;
+                       value = w_length;
+               }
+       } else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
+               if (b_request == ACCESSORY_GET_PROTOCOL) {
+                       *((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
+                       value = sizeof(u16);
+
+                       /* clear any string left over from a previous session */
+                       memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
+                       memset(dev->model, 0, sizeof(dev->model));
+                       memset(dev->description, 0, sizeof(dev->description));
+                       memset(dev->version, 0, sizeof(dev->version));
+                       memset(dev->uri, 0, sizeof(dev->uri));
+                       memset(dev->serial, 0, sizeof(dev->serial));
+                       dev->start_requested = 0;
+                       dev->audio_mode = 0;
+               }
+       }
+
+       if (value >= 0) {
+               cdev->req->zero = 0;
+               cdev->req->length = value;
+               value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+               if (value < 0)
+                       ERROR(cdev, "%s setup response queue error\n",
+                               __func__);
+       }
+
+err:
+       if (value == -EOPNOTSUPP)
+               VDBG(cdev,
+                       "unknown class-specific control req "
+                       "%02x.%02x v%04x i%04x l%u\n",
+                       ctrl->bRequestType, ctrl->bRequest,
+                       w_value, w_index, w_length);
+       return value;
+}
+EXPORT_SYMBOL_GPL(acc_ctrlrequest);
+
+static int
+__acc_function_bind(struct usb_configuration *c,
+                       struct usb_function *f, bool configfs)
+{
+       struct usb_composite_dev *cdev = c->cdev;
+       struct acc_dev  *dev = func_to_dev(f);
+       int                     id;
+       int                     ret;
+
+       DBG(cdev, "acc_function_bind dev: %p\n", dev);
+
+       if (configfs) {
+               if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+                       ret = usb_string_id(c->cdev);
+                       if (ret < 0)
+                               return ret;
+                       acc_string_defs[INTERFACE_STRING_INDEX].id = ret;
+                       acc_interface_desc.iInterface = ret;
+               }
+               dev->cdev = c->cdev;
+       }
+       ret = hid_register_driver(&acc_hid_driver);
+       if (ret)
+               return ret;
+
+       dev->start_requested = 0;
+
+       /* allocate interface ID(s) */
+       id = usb_interface_id(c, f);
+       if (id < 0)
+               return id;
+       acc_interface_desc.bInterfaceNumber = id;
+
+       /* allocate endpoints */
+       ret = create_bulk_endpoints(dev, &acc_fullspeed_in_desc,
+                       &acc_fullspeed_out_desc);
+       if (ret)
+               return ret;
+
+       /* support high speed hardware */
+       if (gadget_is_dualspeed(c->cdev->gadget)) {
+               acc_highspeed_in_desc.bEndpointAddress =
+                       acc_fullspeed_in_desc.bEndpointAddress;
+               acc_highspeed_out_desc.bEndpointAddress =
+                       acc_fullspeed_out_desc.bEndpointAddress;
+       }
+
+       DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+                       gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+                       f->name, dev->ep_in->name, dev->ep_out->name);
+       return 0;
+}
+
+static int
+acc_function_bind_configfs(struct usb_configuration *c,
+                       struct usb_function *f) {
+       return __acc_function_bind(c, f, true);
+}
+
+static void
+kill_all_hid_devices(struct acc_dev *dev)
+{
+       struct acc_hid_dev *hid;
+       struct list_head *entry, *temp;
+       unsigned long flags;
+
+       /* do nothing if usb accessory device doesn't exist */
+       if (!dev)
+               return;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       list_for_each_safe(entry, temp, &dev->hid_list) {
+               hid = list_entry(entry, struct acc_hid_dev, list);
+               list_del(&hid->list);
+               list_add(&hid->list, &dev->dead_hid_list);
+       }
+       list_for_each_safe(entry, temp, &dev->new_hid_list) {
+               hid = list_entry(entry, struct acc_hid_dev, list);
+               list_del(&hid->list);
+               list_add(&hid->list, &dev->dead_hid_list);
+       }
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       schedule_work(&dev->hid_work);
+}
+
+static void
+acc_hid_unbind(struct acc_dev *dev)
+{
+       hid_unregister_driver(&acc_hid_driver);
+       kill_all_hid_devices(dev);
+}
+
+static void
+acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct acc_dev  *dev = func_to_dev(f);
+       struct usb_request *req;
+       int i;
+
+       while ((req = req_get(dev, &dev->tx_idle)))
+               acc_request_free(req, dev->ep_in);
+       for (i = 0; i < RX_REQ_MAX; i++)
+               acc_request_free(dev->rx_req[i], dev->ep_out);
+
+       acc_hid_unbind(dev);
+}
+
+static void acc_start_work(struct work_struct *data)
+{
+       char *envp[2] = { "ACCESSORY=START", NULL };
+
+       kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static int acc_hid_init(struct acc_hid_dev *hdev)
+{
+       struct hid_device *hid;
+       int ret;
+
+       hid = hid_allocate_device();
+       if (IS_ERR(hid))
+               return PTR_ERR(hid);
+
+       hid->ll_driver = &acc_hid_ll_driver;
+       hid->dev.parent = acc_device.this_device;
+
+       hid->bus = BUS_USB;
+       hid->vendor = HID_ANY_ID;
+       hid->product = HID_ANY_ID;
+       hid->driver_data = hdev;
+       ret = hid_add_device(hid);
+       if (ret) {
+               pr_err("can't add hid device: %d\n", ret);
+               hid_destroy_device(hid);
+               return ret;
+       }
+
+       hdev->hid = hid;
+       return 0;
+}
+
+static void acc_hid_delete(struct acc_hid_dev *hid)
+{
+       kfree(hid->report_desc);
+       kfree(hid);
+}
+
+static void acc_hid_work(struct work_struct *data)
+{
+       struct acc_dev *dev = _acc_dev;
+       struct list_head        *entry, *temp;
+       struct acc_hid_dev *hid;
+       struct list_head        new_list, dead_list;
+       unsigned long flags;
+
+       INIT_LIST_HEAD(&new_list);
+
+       spin_lock_irqsave(&dev->lock, flags);
+
+       /* copy hids that are ready for initialization to new_list */
+       list_for_each_safe(entry, temp, &dev->new_hid_list) {
+               hid = list_entry(entry, struct acc_hid_dev, list);
+               if (hid->report_desc_offset == hid->report_desc_len)
+                       list_move(&hid->list, &new_list);
+       }
+
+       if (list_empty(&dev->dead_hid_list)) {
+               INIT_LIST_HEAD(&dead_list);
+       } else {
+               /* move all of dev->dead_hid_list to dead_list */
+               dead_list.prev = dev->dead_hid_list.prev;
+               dead_list.next = dev->dead_hid_list.next;
+               dead_list.next->prev = &dead_list;
+               dead_list.prev->next = &dead_list;
+               INIT_LIST_HEAD(&dev->dead_hid_list);
+       }
+
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       /* register new HID devices */
+       list_for_each_safe(entry, temp, &new_list) {
+               hid = list_entry(entry, struct acc_hid_dev, list);
+               if (acc_hid_init(hid)) {
+                       pr_err("can't add HID device %p\n", hid);
+                       acc_hid_delete(hid);
+               } else {
+                       spin_lock_irqsave(&dev->lock, flags);
+                       list_move(&hid->list, &dev->hid_list);
+                       spin_unlock_irqrestore(&dev->lock, flags);
+               }
+       }
+
+       /* remove dead HID devices */
+       list_for_each_safe(entry, temp, &dead_list) {
+               hid = list_entry(entry, struct acc_hid_dev, list);
+               list_del(&hid->list);
+               if (hid->hid)
+                       hid_destroy_device(hid->hid);
+               acc_hid_delete(hid);
+       }
+}
+
+static int acc_function_set_alt(struct usb_function *f,
+               unsigned intf, unsigned alt)
+{
+       struct acc_dev  *dev = func_to_dev(f);
+       struct usb_composite_dev *cdev = f->config->cdev;
+       int ret;
+
+       DBG(cdev, "acc_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+       ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+       if (ret)
+               return ret;
+
+       ret = usb_ep_enable(dev->ep_in);
+       if (ret)
+               return ret;
+
+       ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+       if (ret)
+               return ret;
+
+       ret = usb_ep_enable(dev->ep_out);
+       if (ret) {
+               usb_ep_disable(dev->ep_in);
+               return ret;
+       }
+
+       dev->online = 1;
+
+       /* readers may be blocked waiting for us to go online */
+       wake_up(&dev->read_wq);
+       return 0;
+}
+
+static void acc_function_disable(struct usb_function *f)
+{
+       struct acc_dev  *dev = func_to_dev(f);
+       struct usb_composite_dev        *cdev = dev->cdev;
+
+       DBG(cdev, "acc_function_disable\n");
+       acc_set_disconnected(dev);
+       usb_ep_disable(dev->ep_in);
+       usb_ep_disable(dev->ep_out);
+
+       /* readers may be blocked waiting for us to go online */
+       wake_up(&dev->read_wq);
+
+       VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int acc_setup(void)
+{
+       struct acc_dev *dev;
+       int ret;
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       spin_lock_init(&dev->lock);
+       init_waitqueue_head(&dev->read_wq);
+       init_waitqueue_head(&dev->write_wq);
+       atomic_set(&dev->open_excl, 0);
+       INIT_LIST_HEAD(&dev->tx_idle);
+       INIT_LIST_HEAD(&dev->hid_list);
+       INIT_LIST_HEAD(&dev->new_hid_list);
+       INIT_LIST_HEAD(&dev->dead_hid_list);
+       INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
+       INIT_WORK(&dev->hid_work, acc_hid_work);
+
+       /* _acc_dev must be set before calling usb_gadget_register_driver */
+       _acc_dev = dev;
+
+       ret = misc_register(&acc_device);
+       if (ret)
+               goto err;
+
+       return 0;
+
+err:
+       kfree(dev);
+       pr_err("USB accessory gadget driver failed to initialize\n");
+       return ret;
+}
+
+void acc_disconnect(void)
+{
+       /* unregister all HID devices if USB is disconnected */
+       kill_all_hid_devices(_acc_dev);
+}
+EXPORT_SYMBOL_GPL(acc_disconnect);
+
+static void acc_cleanup(void)
+{
+       misc_deregister(&acc_device);
+       kfree(_acc_dev);
+       _acc_dev = NULL;
+}
+static struct acc_instance *to_acc_instance(struct config_item *item)
+{
+       return container_of(to_config_group(item), struct acc_instance,
+               func_inst.group);
+}
+
+static void acc_attr_release(struct config_item *item)
+{
+       struct acc_instance *fi_acc = to_acc_instance(item);
+
+       usb_put_function_instance(&fi_acc->func_inst);
+}
+
+static struct configfs_item_operations acc_item_ops = {
+       .release        = acc_attr_release,
+};
+
+static struct config_item_type acc_func_type = {
+       .ct_item_ops    = &acc_item_ops,
+       .ct_owner       = THIS_MODULE,
+};
+
+static struct acc_instance *to_fi_acc(struct usb_function_instance *fi)
+{
+       return container_of(fi, struct acc_instance, func_inst);
+}
+
+static int acc_set_inst_name(struct usb_function_instance *fi, const char *name)
+{
+       struct acc_instance *fi_acc;
+       char *ptr;
+       int name_len;
+
+       name_len = strlen(name) + 1;
+       if (name_len > MAX_INST_NAME_LEN)
+               return -ENAMETOOLONG;
+
+       ptr = kstrndup(name, name_len, GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+
+       fi_acc = to_fi_acc(fi);
+       fi_acc->name = ptr;
+       return 0;
+}
+
+static void acc_free_inst(struct usb_function_instance *fi)
+{
+       struct acc_instance *fi_acc;
+
+       fi_acc = to_fi_acc(fi);
+       kfree(fi_acc->name);
+       acc_cleanup();
+}
+
+static struct usb_function_instance *acc_alloc_inst(void)
+{
+       struct acc_instance *fi_acc;
+       struct acc_dev *dev;
+       int err;
+
+       fi_acc = kzalloc(sizeof(*fi_acc), GFP_KERNEL);
+       if (!fi_acc)
+               return ERR_PTR(-ENOMEM);
+       fi_acc->func_inst.set_inst_name = acc_set_inst_name;
+       fi_acc->func_inst.free_func_inst = acc_free_inst;
+
+       err = acc_setup();
+       if (err) {
+               kfree(fi_acc);
+               pr_err("Error setting ACCESSORY\n");
+               return ERR_PTR(err);
+       }
+
+       config_group_init_type_name(&fi_acc->func_inst.group,
+                                       "", &acc_func_type);
+       dev = _acc_dev;
+       return  &fi_acc->func_inst;
+}
+
+static void acc_free(struct usb_function *f)
+{
+/*NO-OP: no function specific resource allocation in mtp_alloc*/
+}
+
+int acc_ctrlrequest_configfs(struct usb_function *f,
+                       const struct usb_ctrlrequest *ctrl) {
+       if (f->config != NULL && f->config->cdev != NULL)
+               return acc_ctrlrequest(f->config->cdev, ctrl);
+       else
+               return -1;
+}
+
+static struct usb_function *acc_alloc(struct usb_function_instance *fi)
+{
+       struct acc_dev *dev = _acc_dev;
+
+       pr_info("acc_alloc\n");
+
+       dev->function.name = "accessory";
+       dev->function.strings = acc_strings,
+       dev->function.fs_descriptors = fs_acc_descs;
+       dev->function.hs_descriptors = hs_acc_descs;
+       dev->function.bind = acc_function_bind_configfs;
+       dev->function.unbind = acc_function_unbind;
+       dev->function.set_alt = acc_function_set_alt;
+       dev->function.disable = acc_function_disable;
+       dev->function.free_func = acc_free;
+       dev->function.setup = acc_ctrlrequest_configfs;
+
+       return &dev->function;
+}
+DECLARE_USB_FUNCTION_INIT(accessory, acc_alloc_inst, acc_alloc);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
new file mode 100644 (file)
index 0000000..db7903d
--- /dev/null
@@ -0,0 +1,1060 @@
+/*
+ * Gadget Function Driver for USB audio source device
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/usb/audio.h>
+#include <linux/wait.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#define SAMPLE_RATE 44100
+#define FRAMES_PER_MSEC (SAMPLE_RATE / 1000)
+
+#define IN_EP_MAX_PACKET_SIZE 256
+
+/* Number of requests to allocate */
+#define IN_EP_REQ_COUNT 4
+
+#define AUDIO_AC_INTERFACE     0
+#define AUDIO_AS_INTERFACE     1
+#define AUDIO_NUM_INTERFACES   2
+#define MAX_INST_NAME_LEN     40
+
+/* B.3.1  Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc = {
+       .bLength =              USB_DT_INTERFACE_SIZE,
+       .bDescriptorType =      USB_DT_INTERFACE,
+       .bNumEndpoints =        0,
+       .bInterfaceClass =      USB_CLASS_AUDIO,
+       .bInterfaceSubClass =   USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+
+#define UAC_DT_AC_HEADER_LENGTH        UAC_DT_AC_HEADER_SIZE(AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \
+       + UAC_DT_INPUT_TERMINAL_SIZE + UAC_DT_OUTPUT_TERMINAL_SIZE \
+       + UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2  Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
+       .bLength =              UAC_DT_AC_HEADER_LENGTH,
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   UAC_HEADER,
+       .bcdADC =               __constant_cpu_to_le16(0x0100),
+       .wTotalLength =         __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+       .bInCollection =        AUDIO_NUM_INTERFACES,
+       .baInterfaceNr = {
+               [0] =           AUDIO_AC_INTERFACE,
+               [1] =           AUDIO_AS_INTERFACE,
+       }
+};
+
+#define INPUT_TERMINAL_ID      1
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+       .bLength =              UAC_DT_INPUT_TERMINAL_SIZE,
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   UAC_INPUT_TERMINAL,
+       .bTerminalID =          INPUT_TERMINAL_ID,
+       .wTerminalType =        UAC_INPUT_TERMINAL_MICROPHONE,
+       .bAssocTerminal =       0,
+       .wChannelConfig =       0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID                2
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+       .bLength                = UAC_DT_FEATURE_UNIT_SIZE(0),
+       .bDescriptorType        = USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype     = UAC_FEATURE_UNIT,
+       .bUnitID                = FEATURE_UNIT_ID,
+       .bSourceID              = INPUT_TERMINAL_ID,
+       .bControlSize           = 2,
+};
+
+#define OUTPUT_TERMINAL_ID     3
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
+       .bLength                = UAC_DT_OUTPUT_TERMINAL_SIZE,
+       .bDescriptorType        = USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype     = UAC_OUTPUT_TERMINAL,
+       .bTerminalID            = OUTPUT_TERMINAL_ID,
+       .wTerminalType          = UAC_TERMINAL_STREAMING,
+       .bAssocTerminal         = FEATURE_UNIT_ID,
+       .bSourceID              = FEATURE_UNIT_ID,
+};
+
+/* B.4.1  Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+       .bLength =              USB_DT_INTERFACE_SIZE,
+       .bDescriptorType =      USB_DT_INTERFACE,
+       .bAlternateSetting =    0,
+       .bNumEndpoints =        0,
+       .bInterfaceClass =      USB_CLASS_AUDIO,
+       .bInterfaceSubClass =   USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+       .bLength =              USB_DT_INTERFACE_SIZE,
+       .bDescriptorType =      USB_DT_INTERFACE,
+       .bAlternateSetting =    1,
+       .bNumEndpoints =        1,
+       .bInterfaceClass =      USB_CLASS_AUDIO,
+       .bInterfaceSubClass =   USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2  Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_header_desc = {
+       .bLength =              UAC_DT_AS_HEADER_SIZE,
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   UAC_AS_GENERAL,
+       .bTerminalLink =        INPUT_TERMINAL_ID,
+       .bDelay =               1,
+       .wFormatTag =           UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+       .bLength =              UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   UAC_FORMAT_TYPE,
+       .bFormatType =          UAC_FORMAT_TYPE_I,
+       .bSubframeSize =        2,
+       .bBitResolution =       16,
+       .bSamFreqType =         1,
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor hs_as_in_ep_desc  = {
+       .bLength =              USB_DT_ENDPOINT_AUDIO_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_IN,
+       .bmAttributes =         USB_ENDPOINT_SYNC_SYNC
+                               | USB_ENDPOINT_XFER_ISOC,
+       .wMaxPacketSize =       __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+       .bInterval =            4, /* poll 1 per millisecond */
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor fs_as_in_ep_desc  = {
+       .bLength =              USB_DT_ENDPOINT_AUDIO_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_IN,
+       .bmAttributes =         USB_ENDPOINT_SYNC_SYNC
+                               | USB_ENDPOINT_XFER_ISOC,
+       .wMaxPacketSize =       __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+       .bInterval =            1, /* poll 1 per millisecond */
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
+       .bLength =              UAC_ISO_ENDPOINT_DESC_SIZE,
+       .bDescriptorType =      USB_DT_CS_ENDPOINT,
+       .bDescriptorSubtype =   UAC_EP_GENERAL,
+       .bmAttributes =         1,
+       .bLockDelayUnits =      1,
+       .wLockDelay =           __constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *hs_audio_desc[] = {
+       (struct usb_descriptor_header *)&ac_interface_desc,
+       (struct usb_descriptor_header *)&ac_header_desc,
+
+       (struct usb_descriptor_header *)&input_terminal_desc,
+       (struct usb_descriptor_header *)&output_terminal_desc,
+       (struct usb_descriptor_header *)&feature_unit_desc,
+
+       (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+       (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+       (struct usb_descriptor_header *)&as_header_desc,
+
+       (struct usb_descriptor_header *)&as_type_i_desc,
+
+       (struct usb_descriptor_header *)&hs_as_in_ep_desc,
+       (struct usb_descriptor_header *)&as_iso_in_desc,
+       NULL,
+};
+
+static struct usb_descriptor_header *fs_audio_desc[] = {
+       (struct usb_descriptor_header *)&ac_interface_desc,
+       (struct usb_descriptor_header *)&ac_header_desc,
+
+       (struct usb_descriptor_header *)&input_terminal_desc,
+       (struct usb_descriptor_header *)&output_terminal_desc,
+       (struct usb_descriptor_header *)&feature_unit_desc,
+
+       (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+       (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+       (struct usb_descriptor_header *)&as_header_desc,
+
+       (struct usb_descriptor_header *)&as_type_i_desc,
+
+       (struct usb_descriptor_header *)&fs_as_in_ep_desc,
+       (struct usb_descriptor_header *)&as_iso_in_desc,
+       NULL,
+};
+
+static struct snd_pcm_hardware audio_hw_info = {
+       .info =                 SNDRV_PCM_INFO_MMAP |
+                               SNDRV_PCM_INFO_MMAP_VALID |
+                               SNDRV_PCM_INFO_BATCH |
+                               SNDRV_PCM_INFO_INTERLEAVED |
+                               SNDRV_PCM_INFO_BLOCK_TRANSFER,
+
+       .formats                = SNDRV_PCM_FMTBIT_S16_LE,
+       .channels_min           = 2,
+       .channels_max           = 2,
+       .rate_min               = SAMPLE_RATE,
+       .rate_max               = SAMPLE_RATE,
+
+       .buffer_bytes_max =     1024 * 1024,
+       .period_bytes_min =     64,
+       .period_bytes_max =     512 * 1024,
+       .periods_min =          2,
+       .periods_max =          1024,
+};
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_config {
+       int     card;
+       int     device;
+};
+
+struct audio_dev {
+       struct usb_function             func;
+       struct snd_card                 *card;
+       struct snd_pcm                  *pcm;
+       struct snd_pcm_substream *substream;
+
+       struct list_head                idle_reqs;
+       struct usb_ep                   *in_ep;
+
+       spinlock_t                      lock;
+
+       /* beginning, end and current position in our buffer */
+       void                            *buffer_start;
+       void                            *buffer_end;
+       void                            *buffer_pos;
+
+       /* byte size of a "period" */
+       unsigned int                    period;
+       /* bytes sent since last call to snd_pcm_period_elapsed */
+       unsigned int                    period_offset;
+       /* time we started playing */
+       ktime_t                         start_time;
+       /* number of frames sent since start_time */
+       s64                             frames_sent;
+       struct audio_source_config      *config;
+};
+
+static inline struct audio_dev *func_to_audio(struct usb_function *f)
+{
+       return container_of(f, struct audio_dev, func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_instance {
+       struct usb_function_instance func_inst;
+       const char *name;
+       struct audio_source_config *config;
+       struct device *audio_device;
+};
+
+static void audio_source_attr_release(struct config_item *item);
+
+static struct configfs_item_operations audio_source_item_ops = {
+       .release        = audio_source_attr_release,
+};
+
+static struct config_item_type audio_source_func_type = {
+       .ct_item_ops    = &audio_source_item_ops,
+       .ct_owner       = THIS_MODULE,
+};
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+               struct device_attribute *attr, char *buf);
+
+static DEVICE_ATTR(pcm, S_IRUGO, audio_source_pcm_show, NULL);
+
+static struct device_attribute *audio_source_function_attributes[] = {
+       &dev_attr_pcm,
+       NULL
+};
+
+/*--------------------------------------------------------------------------*/
+
+static struct usb_request *audio_request_new(struct usb_ep *ep, int buffer_size)
+{
+       struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+
+       if (!req)
+               return NULL;
+
+       req->buf = kmalloc(buffer_size, GFP_KERNEL);
+       if (!req->buf) {
+               usb_ep_free_request(ep, req);
+               return NULL;
+       }
+       req->length = buffer_size;
+       return req;
+}
+
+static void audio_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+       if (req) {
+               kfree(req->buf);
+               usb_ep_free_request(ep, req);
+       }
+}
+
+static void audio_req_put(struct audio_dev *audio, struct usb_request *req)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&audio->lock, flags);
+       list_add_tail(&req->list, &audio->idle_reqs);
+       spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static struct usb_request *audio_req_get(struct audio_dev *audio)
+{
+       unsigned long flags;
+       struct usb_request *req;
+
+       spin_lock_irqsave(&audio->lock, flags);
+       if (list_empty(&audio->idle_reqs)) {
+               req = 0;
+       } else {
+               req = list_first_entry(&audio->idle_reqs, struct usb_request,
+                               list);
+               list_del(&req->list);
+       }
+       spin_unlock_irqrestore(&audio->lock, flags);
+       return req;
+}
+
+/* send the appropriate number of packets to match our bitrate */
+static void audio_send(struct audio_dev *audio)
+{
+       struct snd_pcm_runtime *runtime;
+       struct usb_request *req;
+       int length, length1, length2, ret;
+       s64 msecs;
+       s64 frames;
+       ktime_t now;
+
+       /* audio->substream will be null if we have been closed */
+       if (!audio->substream)
+               return;
+       /* audio->buffer_pos will be null if we have been stopped */
+       if (!audio->buffer_pos)
+               return;
+
+       runtime = audio->substream->runtime;
+
+       /* compute number of frames to send */
+       now = ktime_get();
+       msecs = div_s64((ktime_to_ns(now) - ktime_to_ns(audio->start_time)),
+                       1000000);
+       frames = div_s64((msecs * SAMPLE_RATE), 1000);
+
+       /* Readjust our frames_sent if we fall too far behind.
+        * If we get too far behind it is better to drop some frames than
+        * to keep sending data too fast in an attempt to catch up.
+        */
+       if (frames - audio->frames_sent > 10 * FRAMES_PER_MSEC)
+               audio->frames_sent = frames - FRAMES_PER_MSEC;
+
+       frames -= audio->frames_sent;
+
+       /* We need to send something to keep the pipeline going */
+       if (frames <= 0)
+               frames = FRAMES_PER_MSEC;
+
+       while (frames > 0) {
+               req = audio_req_get(audio);
+               if (!req)
+                       break;
+
+               length = frames_to_bytes(runtime, frames);
+               if (length > IN_EP_MAX_PACKET_SIZE)
+                       length = IN_EP_MAX_PACKET_SIZE;
+
+               if (audio->buffer_pos + length > audio->buffer_end)
+                       length1 = audio->buffer_end - audio->buffer_pos;
+               else
+                       length1 = length;
+               memcpy(req->buf, audio->buffer_pos, length1);
+               if (length1 < length) {
+                       /* Wrap around and copy remaining length
+                        * at beginning of buffer.
+                        */
+                       length2 = length - length1;
+                       memcpy(req->buf + length1, audio->buffer_start,
+                                       length2);
+                       audio->buffer_pos = audio->buffer_start + length2;
+               } else {
+                       audio->buffer_pos += length1;
+                       if (audio->buffer_pos >= audio->buffer_end)
+                               audio->buffer_pos = audio->buffer_start;
+               }
+
+               req->length = length;
+               ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
+               if (ret < 0) {
+                       pr_err("usb_ep_queue failed ret: %d\n", ret);
+                       audio_req_put(audio, req);
+                       break;
+               }
+
+               frames -= bytes_to_frames(runtime, length);
+               audio->frames_sent += bytes_to_frames(runtime, length);
+       }
+}
+
+static void audio_control_complete(struct usb_ep *ep, struct usb_request *req)
+{
+       /* nothing to do here */
+}
+
+static void audio_data_complete(struct usb_ep *ep, struct usb_request *req)
+{
+       struct audio_dev *audio = req->context;
+
+       pr_debug("audio_data_complete req->status %d req->actual %d\n",
+               req->status, req->actual);
+
+       audio_req_put(audio, req);
+
+       if (!audio->buffer_start || req->status)
+               return;
+
+       audio->period_offset += req->actual;
+       if (audio->period_offset >= audio->period) {
+               snd_pcm_period_elapsed(audio->substream);
+               audio->period_offset = 0;
+       }
+       audio_send(audio);
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+               const struct usb_ctrlrequest *ctrl)
+{
+       int value = -EOPNOTSUPP;
+       u16 ep = le16_to_cpu(ctrl->wIndex);
+       u16 len = le16_to_cpu(ctrl->wLength);
+       u16 w_value = le16_to_cpu(ctrl->wValue);
+
+       pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+                       ctrl->bRequest, w_value, len, ep);
+
+       switch (ctrl->bRequest) {
+       case UAC_SET_CUR:
+       case UAC_SET_MIN:
+       case UAC_SET_MAX:
+       case UAC_SET_RES:
+               value = len;
+               break;
+       default:
+               break;
+       }
+
+       return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+               const struct usb_ctrlrequest *ctrl)
+{
+       struct usb_composite_dev *cdev = f->config->cdev;
+       int value = -EOPNOTSUPP;
+       u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+       u16 len = le16_to_cpu(ctrl->wLength);
+       u16 w_value = le16_to_cpu(ctrl->wValue);
+       u8 *buf = cdev->req->buf;
+
+       pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+                       ctrl->bRequest, w_value, len, ep);
+
+       if (w_value == UAC_EP_CS_ATTR_SAMPLE_RATE << 8) {
+               switch (ctrl->bRequest) {
+               case UAC_GET_CUR:
+               case UAC_GET_MIN:
+               case UAC_GET_MAX:
+               case UAC_GET_RES:
+                       /* return our sample rate */
+                       buf[0] = (u8)SAMPLE_RATE;
+                       buf[1] = (u8)(SAMPLE_RATE >> 8);
+                       buf[2] = (u8)(SAMPLE_RATE >> 16);
+                       value = 3;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return value;
+}
+
+static int
+audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+       struct usb_composite_dev *cdev = f->config->cdev;
+       struct usb_request *req = cdev->req;
+       int value = -EOPNOTSUPP;
+       u16 w_index = le16_to_cpu(ctrl->wIndex);
+       u16 w_value = le16_to_cpu(ctrl->wValue);
+       u16 w_length = le16_to_cpu(ctrl->wLength);
+
+       /* composite driver infrastructure handles everything; interface
+        * activation uses set_alt().
+        */
+       switch (ctrl->bRequestType) {
+       case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+               value = audio_set_endpoint_req(f, ctrl);
+               break;
+
+       case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+               value = audio_get_endpoint_req(f, ctrl);
+               break;
+       }
+
+       /* respond with data transfer or status phase? */
+       if (value >= 0) {
+               pr_debug("audio req%02x.%02x v%04x i%04x l%d\n",
+                       ctrl->bRequestType, ctrl->bRequest,
+                       w_value, w_index, w_length);
+               req->zero = 0;
+               req->length = value;
+               req->complete = audio_control_complete;
+               value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+               if (value < 0)
+                       pr_err("audio response on err %d\n", value);
+       }
+
+       /* device either stalls (value < 0) or reports success */
+       return value;
+}
+
+static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+       struct audio_dev *audio = func_to_audio(f);
+       struct usb_composite_dev *cdev = f->config->cdev;
+       int ret;
+
+       pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
+
+       ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
+       if (ret)
+               return ret;
+
+       usb_ep_enable(audio->in_ep);
+       return 0;
+}
+
+static void audio_disable(struct usb_function *f)
+{
+       struct audio_dev        *audio = func_to_audio(f);
+
+       pr_debug("audio_disable\n");
+       usb_ep_disable(audio->in_ep);
+}
+
+static void audio_free_func(struct usb_function *f)
+{
+       /* no-op */
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void audio_build_desc(struct audio_dev *audio)
+{
+       u8 *sam_freq;
+       int rate;
+
+       /* Set channel numbers */
+       input_terminal_desc.bNrChannels = 2;
+       as_type_i_desc.bNrChannels = 2;
+
+       /* Set sample rates */
+       rate = SAMPLE_RATE;
+       sam_freq = as_type_i_desc.tSamFreq[0];
+       memcpy(sam_freq, &rate, 3);
+}
+
+
+static int snd_card_setup(struct usb_configuration *c,
+       struct audio_source_config *config);
+static struct audio_source_instance *to_fi_audio_source(
+       const struct usb_function_instance *fi);
+
+
+/* audio function driver setup/binding */
+static int
+audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct usb_composite_dev *cdev = c->cdev;
+       struct audio_dev *audio = func_to_audio(f);
+       int status;
+       struct usb_ep *ep;
+       struct usb_request *req;
+       int i;
+       int err;
+
+       if (IS_ENABLED(CONFIG_USB_CONFIGFS)) {
+               struct audio_source_instance *fi_audio =
+                               to_fi_audio_source(f->fi);
+               struct audio_source_config *config =
+                               fi_audio->config;
+
+               err = snd_card_setup(c, config);
+               if (err)
+                       return err;
+       }
+
+       audio_build_desc(audio);
+
+       /* allocate instance-specific interface IDs, and patch descriptors */
+       status = usb_interface_id(c, f);
+       if (status < 0)
+               goto fail;
+       ac_interface_desc.bInterfaceNumber = status;
+
+       /* AUDIO_AC_INTERFACE */
+       ac_header_desc.baInterfaceNr[0] = status;
+
+       status = usb_interface_id(c, f);
+       if (status < 0)
+               goto fail;
+       as_interface_alt_0_desc.bInterfaceNumber = status;
+       as_interface_alt_1_desc.bInterfaceNumber = status;
+
+       /* AUDIO_AS_INTERFACE */
+       ac_header_desc.baInterfaceNr[1] = status;
+
+       status = -ENODEV;
+
+       /* allocate our endpoint */
+       ep = usb_ep_autoconfig(cdev->gadget, &fs_as_in_ep_desc);
+       if (!ep)
+               goto fail;
+       audio->in_ep = ep;
+       ep->driver_data = audio; /* claim */
+
+       if (gadget_is_dualspeed(c->cdev->gadget))
+               hs_as_in_ep_desc.bEndpointAddress =
+                       fs_as_in_ep_desc.bEndpointAddress;
+
+       f->fs_descriptors = fs_audio_desc;
+       f->hs_descriptors = hs_audio_desc;
+
+       for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) {
+               req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE);
+               if (req) {
+                       req->context = audio;
+                       req->complete = audio_data_complete;
+                       audio_req_put(audio, req);
+               } else
+                       status = -ENOMEM;
+       }
+
+fail:
+       return status;
+}
+
+static void
+audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct audio_dev *audio = func_to_audio(f);
+       struct usb_request *req;
+
+       while ((req = audio_req_get(audio)))
+               audio_request_free(req, audio->in_ep);
+
+       snd_card_free_when_closed(audio->card);
+       audio->card = NULL;
+       audio->pcm = NULL;
+       audio->substream = NULL;
+       audio->in_ep = NULL;
+
+       if (IS_ENABLED(CONFIG_USB_CONFIGFS)) {
+               struct audio_source_instance *fi_audio =
+                               to_fi_audio_source(f->fi);
+               struct audio_source_config *config =
+                               fi_audio->config;
+
+               config->card = -1;
+               config->device = -1;
+       }
+}
+
+static void audio_pcm_playback_start(struct audio_dev *audio)
+{
+       audio->start_time = ktime_get();
+       audio->frames_sent = 0;
+       audio_send(audio);
+}
+
+static void audio_pcm_playback_stop(struct audio_dev *audio)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&audio->lock, flags);
+       audio->buffer_start = 0;
+       audio->buffer_end = 0;
+       audio->buffer_pos = 0;
+       spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static int audio_pcm_open(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct audio_dev *audio = substream->private_data;
+
+       runtime->private_data = audio;
+       runtime->hw = audio_hw_info;
+       snd_pcm_limit_hw_rates(runtime);
+       runtime->hw.channels_max = 2;
+
+       audio->substream = substream;
+       return 0;
+}
+
+static int audio_pcm_close(struct snd_pcm_substream *substream)
+{
+       struct audio_dev *audio = substream->private_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&audio->lock, flags);
+       audio->substream = NULL;
+       spin_unlock_irqrestore(&audio->lock, flags);
+
+       return 0;
+}
+
+static int audio_pcm_hw_params(struct snd_pcm_substream *substream,
+                               struct snd_pcm_hw_params *params)
+{
+       unsigned int channels = params_channels(params);
+       unsigned int rate = params_rate(params);
+
+       if (rate != SAMPLE_RATE)
+               return -EINVAL;
+       if (channels != 2)
+               return -EINVAL;
+
+       return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+               params_buffer_bytes(params));
+}
+
+static int audio_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+       return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int audio_pcm_prepare(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct audio_dev *audio = runtime->private_data;
+
+       audio->period = snd_pcm_lib_period_bytes(substream);
+       audio->period_offset = 0;
+       audio->buffer_start = runtime->dma_area;
+       audio->buffer_end = audio->buffer_start
+               + snd_pcm_lib_buffer_bytes(substream);
+       audio->buffer_pos = audio->buffer_start;
+
+       return 0;
+}
+
+static snd_pcm_uframes_t audio_pcm_pointer(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct audio_dev *audio = runtime->private_data;
+       ssize_t bytes = audio->buffer_pos - audio->buffer_start;
+
+       /* return offset of next frame to fill in our buffer */
+       return bytes_to_frames(runtime, bytes);
+}
+
+static int audio_pcm_playback_trigger(struct snd_pcm_substream *substream,
+                                       int cmd)
+{
+       struct audio_dev *audio = substream->runtime->private_data;
+       int ret = 0;
+
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+       case SNDRV_PCM_TRIGGER_RESUME:
+               audio_pcm_playback_start(audio);
+               break;
+
+       case SNDRV_PCM_TRIGGER_STOP:
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+               audio_pcm_playback_stop(audio);
+               break;
+
+       default:
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+static struct audio_dev _audio_dev = {
+       .func = {
+               .name = "audio_source",
+               .bind = audio_bind,
+               .unbind = audio_unbind,
+               .set_alt = audio_set_alt,
+               .setup = audio_setup,
+               .disable = audio_disable,
+               .free_func = audio_free_func,
+       },
+       .lock = __SPIN_LOCK_UNLOCKED(_audio_dev.lock),
+       .idle_reqs = LIST_HEAD_INIT(_audio_dev.idle_reqs),
+};
+
+static struct snd_pcm_ops audio_playback_ops = {
+       .open           = audio_pcm_open,
+       .close          = audio_pcm_close,
+       .ioctl          = snd_pcm_lib_ioctl,
+       .hw_params      = audio_pcm_hw_params,
+       .hw_free        = audio_pcm_hw_free,
+       .prepare        = audio_pcm_prepare,
+       .trigger        = audio_pcm_playback_trigger,
+       .pointer        = audio_pcm_pointer,
+};
+
+int audio_source_bind_config(struct usb_configuration *c,
+               struct audio_source_config *config)
+{
+       struct audio_dev *audio;
+       int err;
+
+       config->card = -1;
+       config->device = -1;
+
+       audio = &_audio_dev;
+
+       err = snd_card_setup(c, config);
+       if (err)
+               return err;
+
+       err = usb_add_function(c, &audio->func);
+       if (err)
+               goto add_fail;
+
+       return 0;
+
+add_fail:
+       snd_card_free(audio->card);
+       return err;
+}
+
+static int snd_card_setup(struct usb_configuration *c,
+               struct audio_source_config *config)
+{
+       struct audio_dev *audio;
+       struct snd_card *card;
+       struct snd_pcm *pcm;
+       int err;
+
+       audio = &_audio_dev;
+
+       err = snd_card_new(&c->cdev->gadget->dev,
+                       SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+                       THIS_MODULE, 0, &card);
+       if (err)
+               return err;
+
+       err = snd_pcm_new(card, "USB audio source", 0, 1, 0, &pcm);
+       if (err)
+               goto pcm_fail;
+
+       pcm->private_data = audio;
+       pcm->info_flags = 0;
+       audio->pcm = pcm;
+
+       strlcpy(pcm->name, "USB gadget audio", sizeof(pcm->name));
+
+       snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &audio_playback_ops);
+       snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+                               NULL, 0, 64 * 1024);
+
+       strlcpy(card->driver, "audio_source", sizeof(card->driver));
+       strlcpy(card->shortname, card->driver, sizeof(card->shortname));
+       strlcpy(card->longname, "USB accessory audio source",
+               sizeof(card->longname));
+
+       err = snd_card_register(card);
+       if (err)
+               goto register_fail;
+
+       config->card = pcm->card->number;
+       config->device = pcm->device;
+       audio->card = card;
+       return 0;
+
+register_fail:
+pcm_fail:
+       snd_card_free(audio->card);
+       return err;
+}
+
+static struct audio_source_instance *to_audio_source_instance(
+                                       struct config_item *item)
+{
+       return container_of(to_config_group(item), struct audio_source_instance,
+               func_inst.group);
+}
+
+static struct audio_source_instance *to_fi_audio_source(
+                                       const struct usb_function_instance *fi)
+{
+       return container_of(fi, struct audio_source_instance, func_inst);
+}
+
+static void audio_source_attr_release(struct config_item *item)
+{
+       struct audio_source_instance *fi_audio = to_audio_source_instance(item);
+
+       usb_put_function_instance(&fi_audio->func_inst);
+}
+
+static int audio_source_set_inst_name(struct usb_function_instance *fi,
+                                       const char *name)
+{
+       struct audio_source_instance *fi_audio;
+       char *ptr;
+       int name_len;
+
+       name_len = strlen(name) + 1;
+       if (name_len > MAX_INST_NAME_LEN)
+               return -ENAMETOOLONG;
+
+       ptr = kstrndup(name, name_len, GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+
+       fi_audio = to_fi_audio_source(fi);
+       fi_audio->name = ptr;
+
+       return 0;
+}
+
+static void audio_source_free_inst(struct usb_function_instance *fi)
+{
+       struct audio_source_instance *fi_audio;
+
+       fi_audio = to_fi_audio_source(fi);
+       device_destroy(fi_audio->audio_device->class,
+                       fi_audio->audio_device->devt);
+       kfree(fi_audio->name);
+       kfree(fi_audio->config);
+}
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct audio_source_instance *fi_audio = dev_get_drvdata(dev);
+       struct audio_source_config *config = fi_audio->config;
+
+       /* print PCM card and device numbers */
+       return sprintf(buf, "%d %d\n", config->card, config->device);
+}
+
+struct device *create_function_device(char *name);
+
+static struct usb_function_instance *audio_source_alloc_inst(void)
+{
+       struct audio_source_instance *fi_audio;
+       struct device_attribute **attrs;
+       struct device_attribute *attr;
+       struct device *dev;
+       void *err_ptr;
+       int err = 0;
+
+       fi_audio = kzalloc(sizeof(*fi_audio), GFP_KERNEL);
+       if (!fi_audio)
+               return ERR_PTR(-ENOMEM);
+
+       fi_audio->func_inst.set_inst_name = audio_source_set_inst_name;
+       fi_audio->func_inst.free_func_inst = audio_source_free_inst;
+
+       fi_audio->config = kzalloc(sizeof(struct audio_source_config),
+                                                       GFP_KERNEL);
+       if (!fi_audio->config) {
+               err_ptr = ERR_PTR(-ENOMEM);
+               goto fail_audio;
+       }
+
+       config_group_init_type_name(&fi_audio->func_inst.group, "",
+                                               &audio_source_func_type);
+       dev = create_function_device("f_audio_source");
+
+       if (IS_ERR(dev)) {
+               err_ptr = dev;
+               goto fail_audio_config;
+       }
+
+       fi_audio->config->card = -1;
+       fi_audio->config->device = -1;
+       fi_audio->audio_device = dev;
+
+       attrs = audio_source_function_attributes;
+       if (attrs) {
+               while ((attr = *attrs++) && !err)
+                       err = device_create_file(dev, attr);
+               if (err) {
+                       err_ptr = ERR_PTR(-EINVAL);
+                       goto fail_device;
+               }
+       }
+
+       dev_set_drvdata(dev, fi_audio);
+       _audio_dev.config = fi_audio->config;
+
+       return  &fi_audio->func_inst;
+
+fail_device:
+       device_destroy(dev->class, dev->devt);
+fail_audio_config:
+       kfree(fi_audio->config);
+fail_audio:
+       kfree(fi_audio);
+       return err_ptr;
+
+}
+
+static struct usb_function *audio_source_alloc(struct usb_function_instance *fi)
+{
+       return &_audio_dev.func;
+}
+
+DECLARE_USB_FUNCTION_INIT(audio_source, audio_source_alloc_inst,
+                       audio_source_alloc);
+MODULE_LICENSE("GPL");
index 898a570319f17c3128b7a44878ebc09ea7242961..847f70363477c3ecbc640584b18e4ea59037a4fb 100644 (file)
@@ -1040,6 +1040,65 @@ static void f_midi_free_inst(struct usb_function_instance *f)
        kfree(opts);
 }
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+extern struct device *create_function_device(char *name);
+static ssize_t alsa_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct usb_function_instance *fi_midi = dev_get_drvdata(dev);
+       struct f_midi *midi;
+
+       if (!fi_midi->f)
+               dev_warn(dev, "f_midi: function not set\n");
+
+       if (fi_midi && fi_midi->f) {
+               midi = func_to_midi(fi_midi->f);
+               if (midi->rmidi && midi->rmidi->card)
+                       return sprintf(buf, "%d %d\n",
+                       midi->rmidi->card->number, midi->rmidi->device);
+       }
+
+       /* print PCM card and device numbers */
+       return sprintf(buf, "%d %d\n", -1, -1);
+}
+
+static DEVICE_ATTR(alsa, S_IRUGO, alsa_show, NULL);
+
+static struct device_attribute *alsa_function_attributes[] = {
+       &dev_attr_alsa,
+       NULL
+};
+
+static int create_alsa_device(struct usb_function_instance *fi)
+{
+       struct device *dev;
+       struct device_attribute **attrs;
+       struct device_attribute *attr;
+       int err = 0;
+
+       dev = create_function_device("f_midi");
+       if (IS_ERR(dev))
+               return PTR_ERR(dev);
+
+       attrs = alsa_function_attributes;
+       if (attrs) {
+               while ((attr = *attrs++) && !err)
+                       err = device_create_file(dev, attr);
+               if (err) {
+                       device_destroy(dev->class, dev->devt);
+                       return -EINVAL;
+               }
+       }
+       dev_set_drvdata(dev, fi);
+       return 0;
+}
+#else
+static int create_alsa_device(struct usb_function_instance *fi)
+{
+       return 0;
+}
+#endif
+
 static struct usb_function_instance *f_midi_alloc_inst(void)
 {
        struct f_midi_opts *opts;
@@ -1057,6 +1116,11 @@ static struct usb_function_instance *f_midi_alloc_inst(void)
        opts->in_ports = 1;
        opts->out_ports = 1;
 
+       if (create_alsa_device(&opts->func_inst)) {
+               kfree(opts);
+               return ERR_PTR(-ENODEV);
+       }
+
        config_group_init_type_name(&opts->func_inst.group, "",
                                    &midi_func_type);
 
@@ -1076,6 +1140,7 @@ static void f_midi_free(struct usb_function *f)
        for (i = opts->in_ports - 1; i >= 0; --i)
                kfree(midi->in_port[i]);
        kfree(midi);
+       opts->func_inst.f = NULL;
        --opts->refcnt;
        mutex_unlock(&opts->lock);
 }
@@ -1158,6 +1223,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
        midi->func.disable      = f_midi_disable;
        midi->func.free_func    = f_midi_free;
 
+       fi->f = &midi->func;
        return &midi->func;
 
 setup_fail:
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
new file mode 100644 (file)
index 0000000..d8b69af
--- /dev/null
@@ -0,0 +1,1537 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_mtp.h>
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+
+#include "configfs.h"
+
+#define MTP_BULK_BUFFER_SIZE       16384
+#define INTR_BUFFER_SIZE           28
+#define MAX_INST_NAME_LEN          40
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX 0
+
+/* values for mtp_dev.state */
+#define STATE_OFFLINE               0   /* initial state, disconnected */
+#define STATE_READY                 1   /* ready for userspace calls */
+#define STATE_BUSY                  2   /* processing userspace calls */
+#define STATE_CANCELED              3   /* transaction canceled by host */
+#define STATE_ERROR                 4   /* error from completion routine */
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+#define INTR_REQ_MAX 5
+
+/* ID for Microsoft MTP OS String */
+#define MTP_OS_STRING_ID   0xEE
+
+/* MTP class reqeusts */
+#define MTP_REQ_CANCEL              0x64
+#define MTP_REQ_GET_EXT_EVENT_DATA  0x65
+#define MTP_REQ_RESET               0x66
+#define MTP_REQ_GET_DEVICE_STATUS   0x67
+
+/* constants for device status */
+#define MTP_RESPONSE_OK             0x2001
+#define MTP_RESPONSE_DEVICE_BUSY    0x2019
+#define DRIVER_NAME "mtp"
+
+static const char mtp_shortname[] = DRIVER_NAME "_usb";
+
+struct mtp_dev {
+       struct usb_function function;
+       struct usb_composite_dev *cdev;
+       spinlock_t lock;
+
+       struct usb_ep *ep_in;
+       struct usb_ep *ep_out;
+       struct usb_ep *ep_intr;
+
+       int state;
+
+       /* synchronize access to our device file */
+       atomic_t open_excl;
+       /* to enforce only one ioctl at a time */
+       atomic_t ioctl_excl;
+
+       struct list_head tx_idle;
+       struct list_head intr_idle;
+
+       wait_queue_head_t read_wq;
+       wait_queue_head_t write_wq;
+       wait_queue_head_t intr_wq;
+       struct usb_request *rx_req[RX_REQ_MAX];
+       int rx_done;
+
+       /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
+        * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
+        */
+       struct workqueue_struct *wq;
+       struct work_struct send_file_work;
+       struct work_struct receive_file_work;
+       struct file *xfer_file;
+       loff_t xfer_file_offset;
+       int64_t xfer_file_length;
+       unsigned xfer_send_header;
+       uint16_t xfer_command;
+       uint32_t xfer_transaction_id;
+       int xfer_result;
+};
+
+static struct usb_interface_descriptor mtp_interface_desc = {
+       .bLength                = USB_DT_INTERFACE_SIZE,
+       .bDescriptorType        = USB_DT_INTERFACE,
+       .bInterfaceNumber       = 0,
+       .bNumEndpoints          = 3,
+       .bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
+       .bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
+       .bInterfaceProtocol     = 0,
+};
+
+static struct usb_interface_descriptor ptp_interface_desc = {
+       .bLength                = USB_DT_INTERFACE_SIZE,
+       .bDescriptorType        = USB_DT_INTERFACE,
+       .bInterfaceNumber       = 0,
+       .bNumEndpoints          = 3,
+       .bInterfaceClass        = USB_CLASS_STILL_IMAGE,
+       .bInterfaceSubClass     = 1,
+       .bInterfaceProtocol     = 1,
+};
+
+static struct usb_endpoint_descriptor mtp_ss_in_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_IN,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+       .wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_ss_in_comp_desc = {
+       .bLength                = sizeof(mtp_ss_in_comp_desc),
+       .bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+       /* .bMaxBurst           = DYNAMIC, */
+};
+
+static struct usb_endpoint_descriptor mtp_ss_out_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_OUT,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+       .wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_ss_out_comp_desc = {
+       .bLength                = sizeof(mtp_ss_out_comp_desc),
+       .bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+       /* .bMaxBurst           = DYNAMIC, */
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_IN,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+       .wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_OUT,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+       .wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_IN,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_OUT,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_intr_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_IN,
+       .bmAttributes           = USB_ENDPOINT_XFER_INT,
+       .wMaxPacketSize         = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
+       .bInterval              = 6,
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_intr_ss_comp_desc = {
+       .bLength                = sizeof(mtp_intr_ss_comp_desc),
+       .bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+       .wBytesPerInterval      = cpu_to_le16(INTR_BUFFER_SIZE),
+};
+
+static struct usb_descriptor_header *fs_mtp_descs[] = {
+       (struct usb_descriptor_header *) &mtp_interface_desc,
+       (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+       (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+       (struct usb_descriptor_header *) &mtp_intr_desc,
+       NULL,
+};
+
+static struct usb_descriptor_header *hs_mtp_descs[] = {
+       (struct usb_descriptor_header *) &mtp_interface_desc,
+       (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+       (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+       (struct usb_descriptor_header *) &mtp_intr_desc,
+       NULL,
+};
+
+static struct usb_descriptor_header *ss_mtp_descs[] = {
+       (struct usb_descriptor_header *) &mtp_interface_desc,
+       (struct usb_descriptor_header *) &mtp_ss_in_desc,
+       (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
+       (struct usb_descriptor_header *) &mtp_ss_out_desc,
+       (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
+       (struct usb_descriptor_header *) &mtp_intr_desc,
+       (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
+       NULL,
+};
+
+static struct usb_descriptor_header *fs_ptp_descs[] = {
+       (struct usb_descriptor_header *) &ptp_interface_desc,
+       (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+       (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+       (struct usb_descriptor_header *) &mtp_intr_desc,
+       NULL,
+};
+
+static struct usb_descriptor_header *hs_ptp_descs[] = {
+       (struct usb_descriptor_header *) &ptp_interface_desc,
+       (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+       (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+       (struct usb_descriptor_header *) &mtp_intr_desc,
+       NULL,
+};
+
+static struct usb_descriptor_header *ss_ptp_descs[] = {
+       (struct usb_descriptor_header *) &ptp_interface_desc,
+       (struct usb_descriptor_header *) &mtp_ss_in_desc,
+       (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
+       (struct usb_descriptor_header *) &mtp_ss_out_desc,
+       (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
+       (struct usb_descriptor_header *) &mtp_intr_desc,
+       (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
+       NULL,
+};
+
+static struct usb_string mtp_string_defs[] = {
+       /* Naming interface "MTP" so libmtp will recognize us */
+       [INTERFACE_STRING_INDEX].s      = "MTP",
+       {  },   /* end of list */
+};
+
+static struct usb_gadget_strings mtp_string_table = {
+       .language               = 0x0409,       /* en-US */
+       .strings                = mtp_string_defs,
+};
+
+static struct usb_gadget_strings *mtp_strings[] = {
+       &mtp_string_table,
+       NULL,
+};
+
+/* Microsoft MTP OS String */
+static u8 mtp_os_string[] = {
+       18, /* sizeof(mtp_os_string) */
+       USB_DT_STRING,
+       /* Signature field: "MSFT100" */
+       'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
+       /* vendor code */
+       1,
+       /* padding */
+       0
+};
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mtp_ext_config_desc_header {
+       __le32  dwLength;
+       __u16   bcdVersion;
+       __le16  wIndex;
+       __u8    bCount;
+       __u8    reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mtp_ext_config_desc_function {
+       __u8    bFirstInterfaceNumber;
+       __u8    bInterfaceCount;
+       __u8    compatibleID[8];
+       __u8    subCompatibleID[8];
+       __u8    reserved[6];
+};
+
+/* MTP Extended Configuration Descriptor */
+struct {
+       struct mtp_ext_config_desc_header       header;
+       struct mtp_ext_config_desc_function    function;
+} mtp_ext_config_desc = {
+       .header = {
+               .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
+               .bcdVersion = __constant_cpu_to_le16(0x0100),
+               .wIndex = __constant_cpu_to_le16(4),
+               .bCount = 1,
+       },
+       .function = {
+               .bFirstInterfaceNumber = 0,
+               .bInterfaceCount = 1,
+               .compatibleID = { 'M', 'T', 'P' },
+       },
+};
+
+struct mtp_device_status {
+       __le16  wLength;
+       __le16  wCode;
+};
+
+struct mtp_data_header {
+       /* length of packet, including this header */
+       __le32  length;
+       /* container type (2 for data packet) */
+       __le16  type;
+       /* MTP command code */
+       __le16  command;
+       /* MTP transaction ID */
+       __le32  transaction_id;
+};
+
+struct mtp_instance {
+       struct usb_function_instance func_inst;
+       const char *name;
+       struct mtp_dev *dev;
+       char mtp_ext_compat_id[16];
+       struct usb_os_desc mtp_os_desc;
+};
+
+/* temporary variable used between mtp_open() and mtp_gadget_bind() */
+static struct mtp_dev *_mtp_dev;
+
+static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
+{
+       return container_of(f, struct mtp_dev, function);
+}
+
+static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
+{
+       struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+
+       if (!req)
+               return NULL;
+
+       /* now allocate buffers for the requests */
+       req->buf = kmalloc(buffer_size, GFP_KERNEL);
+       if (!req->buf) {
+               usb_ep_free_request(ep, req);
+               return NULL;
+       }
+
+       return req;
+}
+
+static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+       if (req) {
+               kfree(req->buf);
+               usb_ep_free_request(ep, req);
+       }
+}
+
+static inline int mtp_lock(atomic_t *excl)
+{
+       if (atomic_inc_return(excl) == 1) {
+               return 0;
+       } else {
+               atomic_dec(excl);
+               return -1;
+       }
+}
+
+static inline void mtp_unlock(atomic_t *excl)
+{
+       atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
+               struct usb_request *req)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       list_add_tail(&req->list, head);
+       spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request
+*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
+{
+       unsigned long flags;
+       struct usb_request *req;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       if (list_empty(head)) {
+               req = 0;
+       } else {
+               req = list_first_entry(head, struct usb_request, list);
+               list_del(&req->list);
+       }
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return req;
+}
+
+static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+       struct mtp_dev *dev = _mtp_dev;
+
+       if (req->status != 0)
+               dev->state = STATE_ERROR;
+
+       mtp_req_put(dev, &dev->tx_idle, req);
+
+       wake_up(&dev->write_wq);
+}
+
+static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+       struct mtp_dev *dev = _mtp_dev;
+
+       dev->rx_done = 1;
+       if (req->status != 0)
+               dev->state = STATE_ERROR;
+
+       wake_up(&dev->read_wq);
+}
+
+static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
+{
+       struct mtp_dev *dev = _mtp_dev;
+
+       if (req->status != 0)
+               dev->state = STATE_ERROR;
+
+       mtp_req_put(dev, &dev->intr_idle, req);
+
+       wake_up(&dev->intr_wq);
+}
+
+static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
+                               struct usb_endpoint_descriptor *in_desc,
+                               struct usb_endpoint_descriptor *out_desc,
+                               struct usb_endpoint_descriptor *intr_desc)
+{
+       struct usb_composite_dev *cdev = dev->cdev;
+       struct usb_request *req;
+       struct usb_ep *ep;
+       int i;
+
+       DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+       ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+       if (!ep) {
+               DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+               return -ENODEV;
+       }
+       DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+       ep->driver_data = dev;          /* claim the endpoint */
+       dev->ep_in = ep;
+
+       ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+       if (!ep) {
+               DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+               return -ENODEV;
+       }
+       DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+       ep->driver_data = dev;          /* claim the endpoint */
+       dev->ep_out = ep;
+
+       ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
+       if (!ep) {
+               DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
+               return -ENODEV;
+       }
+       DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
+       ep->driver_data = dev;          /* claim the endpoint */
+       dev->ep_intr = ep;
+
+       /* now allocate requests for our endpoints */
+       for (i = 0; i < TX_REQ_MAX; i++) {
+               req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
+               if (!req)
+                       goto fail;
+               req->complete = mtp_complete_in;
+               mtp_req_put(dev, &dev->tx_idle, req);
+       }
+       for (i = 0; i < RX_REQ_MAX; i++) {
+               req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
+               if (!req)
+                       goto fail;
+               req->complete = mtp_complete_out;
+               dev->rx_req[i] = req;
+       }
+       for (i = 0; i < INTR_REQ_MAX; i++) {
+               req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
+               if (!req)
+                       goto fail;
+               req->complete = mtp_complete_intr;
+               mtp_req_put(dev, &dev->intr_idle, req);
+       }
+
+       return 0;
+
+fail:
+       pr_err("mtp_bind() could not allocate requests\n");
+       return -1;
+}
+
+static ssize_t mtp_read(struct file *fp, char __user *buf,
+       size_t count, loff_t *pos)
+{
+       struct mtp_dev *dev = fp->private_data;
+       struct usb_composite_dev *cdev = dev->cdev;
+       struct usb_request *req;
+       ssize_t r = count;
+       unsigned xfer;
+       int ret = 0;
+
+       DBG(cdev, "mtp_read(%zu)\n", count);
+
+       if (count > MTP_BULK_BUFFER_SIZE)
+               return -EINVAL;
+
+       /* we will block until we're online */
+       DBG(cdev, "mtp_read: waiting for online state\n");
+       ret = wait_event_interruptible(dev->read_wq,
+               dev->state != STATE_OFFLINE);
+       if (ret < 0) {
+               r = ret;
+               goto done;
+       }
+       spin_lock_irq(&dev->lock);
+       if (dev->state == STATE_CANCELED) {
+               /* report cancelation to userspace */
+               dev->state = STATE_READY;
+               spin_unlock_irq(&dev->lock);
+               return -ECANCELED;
+       }
+       dev->state = STATE_BUSY;
+       spin_unlock_irq(&dev->lock);
+
+requeue_req:
+       /* queue a request */
+       req = dev->rx_req[0];
+       req->length = count;
+       dev->rx_done = 0;
+       ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+       if (ret < 0) {
+               r = -EIO;
+               goto done;
+       } else {
+               DBG(cdev, "rx %p queue\n", req);
+       }
+
+       /* wait for a request to complete */
+       ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+       if (ret < 0) {
+               r = ret;
+               usb_ep_dequeue(dev->ep_out, req);
+               goto done;
+       }
+       if (dev->state == STATE_BUSY) {
+               /* If we got a 0-len packet, throw it back and try again. */
+               if (req->actual == 0)
+                       goto requeue_req;
+
+               DBG(cdev, "rx %p %d\n", req, req->actual);
+               xfer = (req->actual < count) ? req->actual : count;
+               r = xfer;
+               if (copy_to_user(buf, req->buf, xfer))
+                       r = -EFAULT;
+       } else
+               r = -EIO;
+
+done:
+       spin_lock_irq(&dev->lock);
+       if (dev->state == STATE_CANCELED)
+               r = -ECANCELED;
+       else if (dev->state != STATE_OFFLINE)
+               dev->state = STATE_READY;
+       spin_unlock_irq(&dev->lock);
+
+       DBG(cdev, "mtp_read returning %zd\n", r);
+       return r;
+}
+
+static ssize_t mtp_write(struct file *fp, const char __user *buf,
+       size_t count, loff_t *pos)
+{
+       struct mtp_dev *dev = fp->private_data;
+       struct usb_composite_dev *cdev = dev->cdev;
+       struct usb_request *req = 0;
+       ssize_t r = count;
+       unsigned xfer;
+       int sendZLP = 0;
+       int ret;
+
+       DBG(cdev, "mtp_write(%zu)\n", count);
+
+       spin_lock_irq(&dev->lock);
+       if (dev->state == STATE_CANCELED) {
+               /* report cancelation to userspace */
+               dev->state = STATE_READY;
+               spin_unlock_irq(&dev->lock);
+               return -ECANCELED;
+       }
+       if (dev->state == STATE_OFFLINE) {
+               spin_unlock_irq(&dev->lock);
+               return -ENODEV;
+       }
+       dev->state = STATE_BUSY;
+       spin_unlock_irq(&dev->lock);
+
+       /* we need to send a zero length packet to signal the end of transfer
+        * if the transfer size is aligned to a packet boundary.
+        */
+       if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+               sendZLP = 1;
+
+       while (count > 0 || sendZLP) {
+               /* so we exit after sending ZLP */
+               if (count == 0)
+                       sendZLP = 0;
+
+               if (dev->state != STATE_BUSY) {
+                       DBG(cdev, "mtp_write dev->error\n");
+                       r = -EIO;
+                       break;
+               }
+
+               /* get an idle tx request to use */
+               req = 0;
+               ret = wait_event_interruptible(dev->write_wq,
+                       ((req = mtp_req_get(dev, &dev->tx_idle))
+                               || dev->state != STATE_BUSY));
+               if (!req) {
+                       r = ret;
+                       break;
+               }
+
+               if (count > MTP_BULK_BUFFER_SIZE)
+                       xfer = MTP_BULK_BUFFER_SIZE;
+               else
+                       xfer = count;
+               if (xfer && copy_from_user(req->buf, buf, xfer)) {
+                       r = -EFAULT;
+                       break;
+               }
+
+               req->length = xfer;
+               ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+               if (ret < 0) {
+                       DBG(cdev, "mtp_write: xfer error %d\n", ret);
+                       r = -EIO;
+                       break;
+               }
+
+               buf += xfer;
+               count -= xfer;
+
+               /* zero this so we don't try to free it on error exit */
+               req = 0;
+       }
+
+       if (req)
+               mtp_req_put(dev, &dev->tx_idle, req);
+
+       spin_lock_irq(&dev->lock);
+       if (dev->state == STATE_CANCELED)
+               r = -ECANCELED;
+       else if (dev->state != STATE_OFFLINE)
+               dev->state = STATE_READY;
+       spin_unlock_irq(&dev->lock);
+
+       DBG(cdev, "mtp_write returning %zd\n", r);
+       return r;
+}
+
+/* read from a local file and write to USB */
+static void send_file_work(struct work_struct *data)
+{
+       struct mtp_dev *dev = container_of(data, struct mtp_dev,
+                                               send_file_work);
+       struct usb_composite_dev *cdev = dev->cdev;
+       struct usb_request *req = 0;
+       struct mtp_data_header *header;
+       struct file *filp;
+       loff_t offset;
+       int64_t count;
+       int xfer, ret, hdr_size;
+       int r = 0;
+       int sendZLP = 0;
+
+       /* read our parameters */
+       smp_rmb();
+       filp = dev->xfer_file;
+       offset = dev->xfer_file_offset;
+       count = dev->xfer_file_length;
+
+       DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
+
+       if (dev->xfer_send_header) {
+               hdr_size = sizeof(struct mtp_data_header);
+               count += hdr_size;
+       } else {
+               hdr_size = 0;
+       }
+
+       /* we need to send a zero length packet to signal the end of transfer
+        * if the transfer size is aligned to a packet boundary.
+        */
+       if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+               sendZLP = 1;
+
+       while (count > 0 || sendZLP) {
+               /* so we exit after sending ZLP */
+               if (count == 0)
+                       sendZLP = 0;
+
+               /* get an idle tx request to use */
+               req = 0;
+               ret = wait_event_interruptible(dev->write_wq,
+                       (req = mtp_req_get(dev, &dev->tx_idle))
+                       || dev->state != STATE_BUSY);
+               if (dev->state == STATE_CANCELED) {
+                       r = -ECANCELED;
+                       break;
+               }
+               if (!req) {
+                       r = ret;
+                       break;
+               }
+
+               if (count > MTP_BULK_BUFFER_SIZE)
+                       xfer = MTP_BULK_BUFFER_SIZE;
+               else
+                       xfer = count;
+
+               if (hdr_size) {
+                       /* prepend MTP data header */
+                       header = (struct mtp_data_header *)req->buf;
+                       header->length = __cpu_to_le32(count);
+                       header->type = __cpu_to_le16(2); /* data packet */
+                       header->command = __cpu_to_le16(dev->xfer_command);
+                       header->transaction_id =
+                                       __cpu_to_le32(dev->xfer_transaction_id);
+               }
+
+               ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
+                                                               &offset);
+               if (ret < 0) {
+                       r = ret;
+                       break;
+               }
+               xfer = ret + hdr_size;
+               hdr_size = 0;
+
+               req->length = xfer;
+               ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+               if (ret < 0) {
+                       DBG(cdev, "send_file_work: xfer error %d\n", ret);
+                       dev->state = STATE_ERROR;
+                       r = -EIO;
+                       break;
+               }
+
+               count -= xfer;
+
+               /* zero this so we don't try to free it on error exit */
+               req = 0;
+       }
+
+       if (req)
+               mtp_req_put(dev, &dev->tx_idle, req);
+
+       DBG(cdev, "send_file_work returning %d\n", r);
+       /* write the result */
+       dev->xfer_result = r;
+       smp_wmb();
+}
+
+/* read from USB and write to a local file */
+static void receive_file_work(struct work_struct *data)
+{
+       struct mtp_dev *dev = container_of(data, struct mtp_dev,
+                                               receive_file_work);
+       struct usb_composite_dev *cdev = dev->cdev;
+       struct usb_request *read_req = NULL, *write_req = NULL;
+       struct file *filp;
+       loff_t offset;
+       int64_t count;
+       int ret, cur_buf = 0;
+       int r = 0;
+
+       /* read our parameters */
+       smp_rmb();
+       filp = dev->xfer_file;
+       offset = dev->xfer_file_offset;
+       count = dev->xfer_file_length;
+
+       DBG(cdev, "receive_file_work(%lld)\n", count);
+
+       while (count > 0 || write_req) {
+               if (count > 0) {
+                       /* queue a request */
+                       read_req = dev->rx_req[cur_buf];
+                       cur_buf = (cur_buf + 1) % RX_REQ_MAX;
+
+                       read_req->length = (count > MTP_BULK_BUFFER_SIZE
+                                       ? MTP_BULK_BUFFER_SIZE : count);
+                       dev->rx_done = 0;
+                       ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
+                       if (ret < 0) {
+                               r = -EIO;
+                               dev->state = STATE_ERROR;
+                               break;
+                       }
+               }
+
+               if (write_req) {
+                       DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
+                       ret = vfs_write(filp, write_req->buf, write_req->actual,
+                               &offset);
+                       DBG(cdev, "vfs_write %d\n", ret);
+                       if (ret != write_req->actual) {
+                               r = -EIO;
+                               dev->state = STATE_ERROR;
+                               break;
+                       }
+                       write_req = NULL;
+               }
+
+               if (read_req) {
+                       /* wait for our last read to complete */
+                       ret = wait_event_interruptible(dev->read_wq,
+                               dev->rx_done || dev->state != STATE_BUSY);
+                       if (dev->state == STATE_CANCELED) {
+                               r = -ECANCELED;
+                               if (!dev->rx_done)
+                                       usb_ep_dequeue(dev->ep_out, read_req);
+                               break;
+                       }
+                       /* if xfer_file_length is 0xFFFFFFFF, then we read until
+                        * we get a zero length packet
+                        */
+                       if (count != 0xFFFFFFFF)
+                               count -= read_req->actual;
+                       if (read_req->actual < read_req->length) {
+                               /*
+                                * short packet is used to signal EOF for
+                                * sizes > 4 gig
+                                */
+                               DBG(cdev, "got short packet\n");
+                               count = 0;
+                       }
+
+                       write_req = read_req;
+                       read_req = NULL;
+               }
+       }
+
+       DBG(cdev, "receive_file_work returning %d\n", r);
+       /* write the result */
+       dev->xfer_result = r;
+       smp_wmb();
+}
+
+static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
+{
+       struct usb_request *req = NULL;
+       int ret;
+       int length = event->length;
+
+       DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
+
+       if (length < 0 || length > INTR_BUFFER_SIZE)
+               return -EINVAL;
+       if (dev->state == STATE_OFFLINE)
+               return -ENODEV;
+
+       ret = wait_event_interruptible_timeout(dev->intr_wq,
+                       (req = mtp_req_get(dev, &dev->intr_idle)),
+                       msecs_to_jiffies(1000));
+       if (!req)
+               return -ETIME;
+
+       if (copy_from_user(req->buf, (void __user *)event->data, length)) {
+               mtp_req_put(dev, &dev->intr_idle, req);
+               return -EFAULT;
+       }
+       req->length = length;
+       ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
+       if (ret)
+               mtp_req_put(dev, &dev->intr_idle, req);
+
+       return ret;
+}
+
+static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+       struct mtp_dev *dev = fp->private_data;
+       struct file *filp = NULL;
+       int ret = -EINVAL;
+
+       if (mtp_lock(&dev->ioctl_excl))
+               return -EBUSY;
+
+       switch (code) {
+       case MTP_SEND_FILE:
+       case MTP_RECEIVE_FILE:
+       case MTP_SEND_FILE_WITH_HEADER:
+       {
+               struct mtp_file_range   mfr;
+               struct work_struct *work;
+
+               spin_lock_irq(&dev->lock);
+               if (dev->state == STATE_CANCELED) {
+                       /* report cancelation to userspace */
+                       dev->state = STATE_READY;
+                       spin_unlock_irq(&dev->lock);
+                       ret = -ECANCELED;
+                       goto out;
+               }
+               if (dev->state == STATE_OFFLINE) {
+                       spin_unlock_irq(&dev->lock);
+                       ret = -ENODEV;
+                       goto out;
+               }
+               dev->state = STATE_BUSY;
+               spin_unlock_irq(&dev->lock);
+
+               if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
+                       ret = -EFAULT;
+                       goto fail;
+               }
+               /* hold a reference to the file while we are working with it */
+               filp = fget(mfr.fd);
+               if (!filp) {
+                       ret = -EBADF;
+                       goto fail;
+               }
+
+               /* write the parameters */
+               dev->xfer_file = filp;
+               dev->xfer_file_offset = mfr.offset;
+               dev->xfer_file_length = mfr.length;
+               smp_wmb();
+
+               if (code == MTP_SEND_FILE_WITH_HEADER) {
+                       work = &dev->send_file_work;
+                       dev->xfer_send_header = 1;
+                       dev->xfer_command = mfr.command;
+                       dev->xfer_transaction_id = mfr.transaction_id;
+               } else if (code == MTP_SEND_FILE) {
+                       work = &dev->send_file_work;
+                       dev->xfer_send_header = 0;
+               } else {
+                       work = &dev->receive_file_work;
+               }
+
+               /* We do the file transfer on a work queue so it will run
+                * in kernel context, which is necessary for vfs_read and
+                * vfs_write to use our buffers in the kernel address space.
+                */
+               queue_work(dev->wq, work);
+               /* wait for operation to complete */
+               flush_workqueue(dev->wq);
+               fput(filp);
+
+               /* read the result */
+               smp_rmb();
+               ret = dev->xfer_result;
+               break;
+       }
+       case MTP_SEND_EVENT:
+       {
+               struct mtp_event        event;
+               /* return here so we don't change dev->state below,
+                * which would interfere with bulk transfer state.
+                */
+               if (copy_from_user(&event, (void __user *)value, sizeof(event)))
+                       ret = -EFAULT;
+               else
+                       ret = mtp_send_event(dev, &event);
+               goto out;
+       }
+       }
+
+fail:
+       spin_lock_irq(&dev->lock);
+       if (dev->state == STATE_CANCELED)
+               ret = -ECANCELED;
+       else if (dev->state != STATE_OFFLINE)
+               dev->state = STATE_READY;
+       spin_unlock_irq(&dev->lock);
+out:
+       mtp_unlock(&dev->ioctl_excl);
+       DBG(dev->cdev, "ioctl returning %d\n", ret);
+       return ret;
+}
+
+static int mtp_open(struct inode *ip, struct file *fp)
+{
+       printk(KERN_INFO "mtp_open\n");
+       if (mtp_lock(&_mtp_dev->open_excl))
+               return -EBUSY;
+
+       /* clear any error condition */
+       if (_mtp_dev->state != STATE_OFFLINE)
+               _mtp_dev->state = STATE_READY;
+
+       fp->private_data = _mtp_dev;
+       return 0;
+}
+
+static int mtp_release(struct inode *ip, struct file *fp)
+{
+       printk(KERN_INFO "mtp_release\n");
+
+       mtp_unlock(&_mtp_dev->open_excl);
+       return 0;
+}
+
+/* file operations for /dev/mtp_usb */
+static const struct file_operations mtp_fops = {
+       .owner = THIS_MODULE,
+       .read = mtp_read,
+       .write = mtp_write,
+       .unlocked_ioctl = mtp_ioctl,
+       .open = mtp_open,
+       .release = mtp_release,
+};
+
+static struct miscdevice mtp_device = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = mtp_shortname,
+       .fops = &mtp_fops,
+};
+
+static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
+                               const struct usb_ctrlrequest *ctrl)
+{
+       struct mtp_dev *dev = _mtp_dev;
+       int     value = -EOPNOTSUPP;
+       u16     w_index = le16_to_cpu(ctrl->wIndex);
+       u16     w_value = le16_to_cpu(ctrl->wValue);
+       u16     w_length = le16_to_cpu(ctrl->wLength);
+       unsigned long   flags;
+
+       VDBG(cdev, "mtp_ctrlrequest "
+                       "%02x.%02x v%04x i%04x l%u\n",
+                       ctrl->bRequestType, ctrl->bRequest,
+                       w_value, w_index, w_length);
+
+       /* Handle MTP OS string */
+       if (ctrl->bRequestType ==
+                       (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+                       && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
+                       && (w_value >> 8) == USB_DT_STRING
+                       && (w_value & 0xFF) == MTP_OS_STRING_ID) {
+               value = (w_length < sizeof(mtp_os_string)
+                               ? w_length : sizeof(mtp_os_string));
+               memcpy(cdev->req->buf, mtp_os_string, value);
+       } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
+               /* Handle MTP OS descriptor */
+               DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
+                       ctrl->bRequest, w_index, w_value, w_length);
+
+               if (ctrl->bRequest == 1
+                               && (ctrl->bRequestType & USB_DIR_IN)
+                               && (w_index == 4 || w_index == 5)) {
+                       value = (w_length < sizeof(mtp_ext_config_desc) ?
+                                       w_length : sizeof(mtp_ext_config_desc));
+                       memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
+               }
+       } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+               DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
+                       ctrl->bRequest, w_index, w_value, w_length);
+
+               if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
+                               && w_value == 0) {
+                       DBG(cdev, "MTP_REQ_CANCEL\n");
+
+                       spin_lock_irqsave(&dev->lock, flags);
+                       if (dev->state == STATE_BUSY) {
+                               dev->state = STATE_CANCELED;
+                               wake_up(&dev->read_wq);
+                               wake_up(&dev->write_wq);
+                       }
+                       spin_unlock_irqrestore(&dev->lock, flags);
+
+                       /* We need to queue a request to read the remaining
+                        *  bytes, but we don't actually need to look at
+                        * the contents.
+                        */
+                       value = w_length;
+               } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
+                               && w_index == 0 && w_value == 0) {
+                       struct mtp_device_status *status = cdev->req->buf;
+
+                       status->wLength =
+                               __constant_cpu_to_le16(sizeof(*status));
+
+                       DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
+                       spin_lock_irqsave(&dev->lock, flags);
+                       /* device status is "busy" until we report
+                        * the cancelation to userspace
+                        */
+                       if (dev->state == STATE_CANCELED)
+                               status->wCode =
+                                       __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
+                       else
+                               status->wCode =
+                                       __cpu_to_le16(MTP_RESPONSE_OK);
+                       spin_unlock_irqrestore(&dev->lock, flags);
+                       value = sizeof(*status);
+               }
+       }
+
+       /* respond with data transfer or status phase? */
+       if (value >= 0) {
+               int rc;
+
+               cdev->req->zero = value < w_length;
+               cdev->req->length = value;
+               rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+               if (rc < 0)
+                       ERROR(cdev, "%s: response queue error\n", __func__);
+       }
+       return value;
+}
+
+static int
+mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct usb_composite_dev *cdev = c->cdev;
+       struct mtp_dev  *dev = func_to_mtp(f);
+       int                     id;
+       int                     ret;
+       struct mtp_instance *fi_mtp;
+
+       dev->cdev = cdev;
+       DBG(cdev, "mtp_function_bind dev: %p\n", dev);
+
+       /* allocate interface ID(s) */
+       id = usb_interface_id(c, f);
+       if (id < 0)
+               return id;
+       mtp_interface_desc.bInterfaceNumber = id;
+
+       if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+               ret = usb_string_id(c->cdev);
+               if (ret < 0)
+                       return ret;
+               mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
+               mtp_interface_desc.iInterface = ret;
+       }
+
+       fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
+
+       if (cdev->use_os_string) {
+               f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
+                                       GFP_KERNEL);
+               if (!f->os_desc_table)
+                       return -ENOMEM;
+               f->os_desc_n = 1;
+               f->os_desc_table[0].os_desc = &fi_mtp->mtp_os_desc;
+       }
+
+       /* allocate endpoints */
+       ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
+                       &mtp_fullspeed_out_desc, &mtp_intr_desc);
+       if (ret)
+               return ret;
+
+       /* support high speed hardware */
+       if (gadget_is_dualspeed(c->cdev->gadget)) {
+               mtp_highspeed_in_desc.bEndpointAddress =
+                       mtp_fullspeed_in_desc.bEndpointAddress;
+               mtp_highspeed_out_desc.bEndpointAddress =
+                       mtp_fullspeed_out_desc.bEndpointAddress;
+       }
+       /* support super speed hardware */
+       if (gadget_is_superspeed(c->cdev->gadget)) {
+               unsigned max_burst;
+
+               /* Calculate bMaxBurst, we know packet size is 1024 */
+               max_burst = min_t(unsigned, MTP_BULK_BUFFER_SIZE / 1024, 15);
+               mtp_ss_in_desc.bEndpointAddress =
+                       mtp_fullspeed_in_desc.bEndpointAddress;
+               mtp_ss_in_comp_desc.bMaxBurst = max_burst;
+               mtp_ss_out_desc.bEndpointAddress =
+                       mtp_fullspeed_out_desc.bEndpointAddress;
+               mtp_ss_out_comp_desc.bMaxBurst = max_burst;
+       }
+
+       DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+               gadget_is_superspeed(c->cdev->gadget) ? "super" :
+               (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),
+               f->name, dev->ep_in->name, dev->ep_out->name);
+       return 0;
+}
+
+static void
+mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct mtp_dev  *dev = func_to_mtp(f);
+       struct usb_request *req;
+       int i;
+
+       mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
+       while ((req = mtp_req_get(dev, &dev->tx_idle)))
+               mtp_request_free(req, dev->ep_in);
+       for (i = 0; i < RX_REQ_MAX; i++)
+               mtp_request_free(dev->rx_req[i], dev->ep_out);
+       while ((req = mtp_req_get(dev, &dev->intr_idle)))
+               mtp_request_free(req, dev->ep_intr);
+       dev->state = STATE_OFFLINE;
+       kfree(f->os_desc_table);
+       f->os_desc_n = 0;
+}
+
+static int mtp_function_set_alt(struct usb_function *f,
+               unsigned intf, unsigned alt)
+{
+       struct mtp_dev  *dev = func_to_mtp(f);
+       struct usb_composite_dev *cdev = f->config->cdev;
+       int ret;
+
+       DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+       ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+       if (ret)
+               return ret;
+
+       ret = usb_ep_enable(dev->ep_in);
+       if (ret)
+               return ret;
+
+       ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+       if (ret)
+               return ret;
+
+       ret = usb_ep_enable(dev->ep_out);
+       if (ret) {
+               usb_ep_disable(dev->ep_in);
+               return ret;
+       }
+
+       ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
+       if (ret)
+               return ret;
+
+       ret = usb_ep_enable(dev->ep_intr);
+       if (ret) {
+               usb_ep_disable(dev->ep_out);
+               usb_ep_disable(dev->ep_in);
+               return ret;
+       }
+       dev->state = STATE_READY;
+
+       /* readers may be blocked waiting for us to go online */
+       wake_up(&dev->read_wq);
+       return 0;
+}
+
+static void mtp_function_disable(struct usb_function *f)
+{
+       struct mtp_dev  *dev = func_to_mtp(f);
+       struct usb_composite_dev        *cdev = dev->cdev;
+
+       DBG(cdev, "mtp_function_disable\n");
+       dev->state = STATE_OFFLINE;
+       usb_ep_disable(dev->ep_in);
+       usb_ep_disable(dev->ep_out);
+       usb_ep_disable(dev->ep_intr);
+
+       /* readers may be blocked waiting for us to go online */
+       wake_up(&dev->read_wq);
+
+       VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int __mtp_setup(struct mtp_instance *fi_mtp)
+{
+       struct mtp_dev *dev;
+       int ret;
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
+       if (fi_mtp != NULL)
+               fi_mtp->dev = dev;
+
+       if (!dev)
+               return -ENOMEM;
+
+       spin_lock_init(&dev->lock);
+       init_waitqueue_head(&dev->read_wq);
+       init_waitqueue_head(&dev->write_wq);
+       init_waitqueue_head(&dev->intr_wq);
+       atomic_set(&dev->open_excl, 0);
+       atomic_set(&dev->ioctl_excl, 0);
+       INIT_LIST_HEAD(&dev->tx_idle);
+       INIT_LIST_HEAD(&dev->intr_idle);
+
+       dev->wq = create_singlethread_workqueue("f_mtp");
+       if (!dev->wq) {
+               ret = -ENOMEM;
+               goto err1;
+       }
+       INIT_WORK(&dev->send_file_work, send_file_work);
+       INIT_WORK(&dev->receive_file_work, receive_file_work);
+
+       _mtp_dev = dev;
+
+       ret = misc_register(&mtp_device);
+       if (ret)
+               goto err2;
+
+       return 0;
+
+err2:
+       destroy_workqueue(dev->wq);
+err1:
+       _mtp_dev = NULL;
+       kfree(dev);
+       printk(KERN_ERR "mtp gadget driver failed to initialize\n");
+       return ret;
+}
+
+static int mtp_setup_configfs(struct mtp_instance *fi_mtp)
+{
+       return __mtp_setup(fi_mtp);
+}
+
+
+static void mtp_cleanup(void)
+{
+       struct mtp_dev *dev = _mtp_dev;
+
+       if (!dev)
+               return;
+
+       misc_deregister(&mtp_device);
+       destroy_workqueue(dev->wq);
+       _mtp_dev = NULL;
+       kfree(dev);
+}
+
+static struct mtp_instance *to_mtp_instance(struct config_item *item)
+{
+       return container_of(to_config_group(item), struct mtp_instance,
+               func_inst.group);
+}
+
+static void mtp_attr_release(struct config_item *item)
+{
+       struct mtp_instance *fi_mtp = to_mtp_instance(item);
+
+       usb_put_function_instance(&fi_mtp->func_inst);
+}
+
+static struct configfs_item_operations mtp_item_ops = {
+       .release        = mtp_attr_release,
+};
+
+static struct config_item_type mtp_func_type = {
+       .ct_item_ops    = &mtp_item_ops,
+       .ct_owner       = THIS_MODULE,
+};
+
+
+static struct mtp_instance *to_fi_mtp(struct usb_function_instance *fi)
+{
+       return container_of(fi, struct mtp_instance, func_inst);
+}
+
+static int mtp_set_inst_name(struct usb_function_instance *fi, const char *name)
+{
+       struct mtp_instance *fi_mtp;
+       char *ptr;
+       int name_len;
+
+       name_len = strlen(name) + 1;
+       if (name_len > MAX_INST_NAME_LEN)
+               return -ENAMETOOLONG;
+
+       ptr = kstrndup(name, name_len, GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+
+       fi_mtp = to_fi_mtp(fi);
+       fi_mtp->name = ptr;
+
+       return 0;
+}
+
+static void mtp_free_inst(struct usb_function_instance *fi)
+{
+       struct mtp_instance *fi_mtp;
+
+       fi_mtp = to_fi_mtp(fi);
+       kfree(fi_mtp->name);
+       mtp_cleanup();
+       kfree(fi_mtp->mtp_os_desc.group.default_groups);
+       kfree(fi_mtp);
+}
+
+struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
+{
+       struct mtp_instance *fi_mtp;
+       int ret = 0;
+       struct usb_os_desc *descs[1];
+       char *names[1];
+
+       fi_mtp = kzalloc(sizeof(*fi_mtp), GFP_KERNEL);
+       if (!fi_mtp)
+               return ERR_PTR(-ENOMEM);
+       fi_mtp->func_inst.set_inst_name = mtp_set_inst_name;
+       fi_mtp->func_inst.free_func_inst = mtp_free_inst;
+
+       fi_mtp->mtp_os_desc.ext_compat_id = fi_mtp->mtp_ext_compat_id;
+       INIT_LIST_HEAD(&fi_mtp->mtp_os_desc.ext_prop);
+       descs[0] = &fi_mtp->mtp_os_desc;
+       names[0] = "MTP";
+       usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1,
+                                       descs, names, THIS_MODULE);
+
+       if (mtp_config) {
+               ret = mtp_setup_configfs(fi_mtp);
+               if (ret) {
+                       kfree(fi_mtp);
+                       pr_err("Error setting MTP\n");
+                       return ERR_PTR(ret);
+               }
+       } else
+               fi_mtp->dev = _mtp_dev;
+
+       config_group_init_type_name(&fi_mtp->func_inst.group,
+                                       "", &mtp_func_type);
+
+       return  &fi_mtp->func_inst;
+}
+EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
+
+static struct usb_function_instance *mtp_alloc_inst(void)
+{
+               return alloc_inst_mtp_ptp(true);
+}
+
+static int mtp_ctrlreq_configfs(struct usb_function *f,
+                               const struct usb_ctrlrequest *ctrl)
+{
+       return mtp_ctrlrequest(f->config->cdev, ctrl);
+}
+
+static void mtp_free(struct usb_function *f)
+{
+       /*NO-OP: no function specific resource allocation in mtp_alloc*/
+}
+
+struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,
+                                       bool mtp_config)
+{
+       struct mtp_instance *fi_mtp = to_fi_mtp(fi);
+       struct mtp_dev *dev;
+
+       /*
+        * PTP piggybacks on MTP function so make sure we have
+        * created MTP function before we associate this PTP
+        * function with a gadget configuration.
+        */
+       if (fi_mtp->dev == NULL) {
+               pr_err("Error: Create MTP function before linking"
+                               " PTP function with a gadget configuration\n");
+               pr_err("\t1: Delete existing PTP function if any\n");
+               pr_err("\t2: Create MTP function\n");
+               pr_err("\t3: Create and symlink PTP function"
+                               " with a gadget configuration\n");
+               return ERR_PTR(-EINVAL); /* Invalid Configuration */
+       }
+
+       dev = fi_mtp->dev;
+       dev->function.name = DRIVER_NAME;
+       dev->function.strings = mtp_strings;
+       if (mtp_config) {
+               dev->function.fs_descriptors = fs_mtp_descs;
+               dev->function.hs_descriptors = hs_mtp_descs;
+               dev->function.ss_descriptors = ss_mtp_descs;
+       } else {
+               dev->function.fs_descriptors = fs_ptp_descs;
+               dev->function.hs_descriptors = hs_ptp_descs;
+               dev->function.ss_descriptors = ss_ptp_descs;
+       }
+       dev->function.bind = mtp_function_bind;
+       dev->function.unbind = mtp_function_unbind;
+       dev->function.set_alt = mtp_function_set_alt;
+       dev->function.disable = mtp_function_disable;
+       dev->function.setup = mtp_ctrlreq_configfs;
+       dev->function.free_func = mtp_free;
+
+       return &dev->function;
+}
+EXPORT_SYMBOL_GPL(function_alloc_mtp_ptp);
+
+static struct usb_function *mtp_alloc(struct usb_function_instance *fi)
+{
+       return function_alloc_mtp_ptp(fi, true);
+}
+
+DECLARE_USB_FUNCTION_INIT(mtp, mtp_alloc_inst, mtp_alloc);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/function/f_mtp.h b/drivers/usb/gadget/function/f_mtp.h
new file mode 100644 (file)
index 0000000..7adb1ff
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Badhri Jagan Sridharan <badhri@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+extern struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config);
+extern struct usb_function *function_alloc_mtp_ptp(
+                       struct usb_function_instance *fi, bool mtp_config);
diff --git a/drivers/usb/gadget/function/f_ptp.c b/drivers/usb/gadget/function/f_ptp.c
new file mode 100644 (file)
index 0000000..da3e4d5
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Gadget Function Driver for PTP
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Badhri Jagan Sridharan <badhri@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+
+#include "f_mtp.h"
+
+static struct usb_function_instance *ptp_alloc_inst(void)
+{
+       return alloc_inst_mtp_ptp(false);
+}
+
+static struct usb_function *ptp_alloc(struct usb_function_instance *fi)
+{
+       return function_alloc_mtp_ptp(fi, false);
+}
+
+DECLARE_USB_FUNCTION_INIT(ptp, ptp_alloc_inst, ptp_alloc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Badhri Jagan Sridharan");
index e587767e374cbc2fbae3b02879b5358c38d48e69..3a28d8ac3b3d76e7d36638996102e3f5d01e8d14 100644 (file)
  *   - MS-Windows drivers sometimes emit undocumented requests.
  */
 
+static unsigned int rndis_dl_max_pkt_per_xfer = 3;
+module_param(rndis_dl_max_pkt_per_xfer, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rndis_dl_max_pkt_per_xfer,
+       "Maximum packets per transfer for DL aggregation");
+
+static unsigned int rndis_ul_max_pkt_per_xfer = 3;
+module_param(rndis_ul_max_pkt_per_xfer, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rndis_ul_max_pkt_per_xfer,
+       "Maximum packets per transfer for UL aggregation");
+
 struct f_rndis {
        struct gether                   port;
        u8                              ctrl_id, data_id;
@@ -449,7 +459,9 @@ static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req)
 static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
 {
        struct f_rndis                  *rndis = req->context;
+       struct usb_composite_dev        *cdev = rndis->port.func.config->cdev;
        int                             status;
+       rndis_init_msg_type             *buf;
 
        /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
 //     spin_lock(&dev->lock);
@@ -457,6 +469,21 @@ static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
        if (status < 0)
                pr_err("RNDIS command error %d, %d/%d\n",
                        status, req->actual, req->length);
+
+       buf = (rndis_init_msg_type *)req->buf;
+
+       if (buf->MessageType == RNDIS_MSG_INIT) {
+               if (buf->MaxTransferSize > 2048)
+                       rndis->port.multi_pkt_xfer = 1;
+               else
+                       rndis->port.multi_pkt_xfer = 0;
+               DBG(cdev, "%s: MaxTransferSize: %d : Multi_pkt_txr: %s\n",
+                               __func__, buf->MaxTransferSize,
+                               rndis->port.multi_pkt_xfer ? "enabled" :
+                                                           "disabled");
+               if (rndis_dl_max_pkt_per_xfer <= 1)
+                       rndis->port.multi_pkt_xfer = 0;
+       }
 //     spin_unlock(&dev->lock);
 }
 
@@ -792,6 +819,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
 
        rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3, 0);
        rndis_set_host_mac(rndis->params, rndis->ethaddr);
+       rndis_set_max_pkt_xfer(rndis->params, rndis_ul_max_pkt_per_xfer);
 
        if (rndis->manufacturer && rndis->vendorID &&
                        rndis_set_param_vendor(rndis->params, rndis->vendorID,
@@ -978,6 +1006,8 @@ static struct usb_function *rndis_alloc(struct usb_function_instance *fi)
        rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
        rndis->port.wrap = rndis_add_header;
        rndis->port.unwrap = rndis_rm_hdr;
+       rndis->port.ul_max_pkts_per_xfer = rndis_ul_max_pkt_per_xfer;
+       rndis->port.dl_max_pkts_per_xfer = rndis_dl_max_pkt_per_xfer;
 
        rndis->port.func.name = "rndis";
        /* descriptors are per-instance copies */
index 70d3917cc00364e71ca0900557c09fd02ce56bc3..2ec7171b3f0442d44153efae7965a220f0638a05 100644 (file)
 
 #include "rndis.h"
 
+int rndis_ul_max_pkt_per_xfer_rcvd;
+module_param(rndis_ul_max_pkt_per_xfer_rcvd, int, S_IRUGO);
+MODULE_PARM_DESC(rndis_ul_max_pkt_per_xfer_rcvd,
+               "Max num of REMOTE_NDIS_PACKET_MSGs received in a single transfer");
+
+int rndis_ul_max_xfer_size_rcvd;
+module_param(rndis_ul_max_xfer_size_rcvd, int, S_IRUGO);
+MODULE_PARM_DESC(rndis_ul_max_xfer_size_rcvd,
+               "Max size of bus transfer received");
+
 
 /* The driver for your USB chip needs to support ep0 OUT to work with
  * RNDIS, plus all three CDC Ethernet endpoints (interrupt not optional).
@@ -579,12 +589,12 @@ static int rndis_init_response(struct rndis_params *params,
        resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION);
        resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS);
        resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3);
-       resp->MaxPacketsPerTransfer = cpu_to_le32(1);
-       resp->MaxTransferSize = cpu_to_le32(
-                 params->dev->mtu
+       resp->MaxPacketsPerTransfer = cpu_to_le32(params->max_pkt_per_xfer);
+       resp->MaxTransferSize = cpu_to_le32(params->max_pkt_per_xfer *
+               (params->dev->mtu
                + sizeof(struct ethhdr)
                + sizeof(struct rndis_packet_msg_type)
-               + 22);
+               + 22));
        resp->PacketAlignmentFactor = cpu_to_le32(0);
        resp->AFListOffset = cpu_to_le32(0);
        resp->AFListSize = cpu_to_le32(0);
@@ -681,6 +691,13 @@ static int rndis_reset_response(struct rndis_params *params,
        rndis_reset_cmplt_type *resp;
        rndis_resp_t *r;
 
+       u32 length;
+       u8 *xbuf;
+
+       /* drain the response queue */
+       while ((xbuf = rndis_get_next_response(params, &length)))
+               rndis_free_response(params, xbuf);
+
        r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type));
        if (!r)
                return -ENOMEM;
@@ -957,6 +974,8 @@ int rndis_set_param_dev(struct rndis_params *params, struct net_device *dev,
        params->dev = dev;
        params->filter = cdc_filter;
 
+       rndis_ul_max_xfer_size_rcvd = 0;
+       rndis_ul_max_pkt_per_xfer_rcvd = 0;
        return 0;
 }
 EXPORT_SYMBOL_GPL(rndis_set_param_dev);
@@ -989,6 +1008,13 @@ int rndis_set_param_medium(struct rndis_params *params, u32 medium, u32 speed)
 }
 EXPORT_SYMBOL_GPL(rndis_set_param_medium);
 
+void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer)
+{
+       pr_debug("%s:\n", __func__);
+
+       params->max_pkt_per_xfer = max_pkt_per_xfer;
+}
+
 void rndis_add_hdr(struct sk_buff *skb)
 {
        struct rndis_packet_msg_type *header;
@@ -1061,23 +1087,73 @@ int rndis_rm_hdr(struct gether *port,
                        struct sk_buff *skb,
                        struct sk_buff_head *list)
 {
-       /* tmp points to a struct rndis_packet_msg_type */
-       __le32 *tmp = (void *)skb->data;
+       int num_pkts = 1;
 
-       /* MessageType, MessageLength */
-       if (cpu_to_le32(RNDIS_MSG_PACKET)
-                       != get_unaligned(tmp++)) {
-               dev_kfree_skb_any(skb);
-               return -EINVAL;
-       }
-       tmp++;
+       if (skb->len > rndis_ul_max_xfer_size_rcvd)
+               rndis_ul_max_xfer_size_rcvd = skb->len;
+
+       while (skb->len) {
+               struct rndis_packet_msg_type *hdr;
+               struct sk_buff          *skb2;
+               u32             msg_len, data_offset, data_len;
 
-       /* DataOffset, DataLength */
-       if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) {
-               dev_kfree_skb_any(skb);
-               return -EOVERFLOW;
+               /* some rndis hosts send extra byte to avoid zlp, ignore it */
+               if (skb->len == 1) {
+                       dev_kfree_skb_any(skb);
+                       return 0;
+               }
+
+               if (skb->len < sizeof *hdr) {
+                       pr_err("invalid rndis pkt: skblen:%u hdr_len:%zu",
+                                       skb->len, sizeof *hdr);
+                       dev_kfree_skb_any(skb);
+                       return -EINVAL;
+               }
+
+               hdr = (void *)skb->data;
+               msg_len = le32_to_cpu(hdr->MessageLength);
+               data_offset = le32_to_cpu(hdr->DataOffset);
+               data_len = le32_to_cpu(hdr->DataLength);
+
+               if (skb->len < msg_len ||
+                               ((data_offset + data_len + 8) > msg_len)) {
+                       pr_err("invalid rndis message: %d/%d/%d/%d, len:%d\n",
+                                       le32_to_cpu(hdr->MessageType),
+                                       msg_len, data_offset, data_len, skb->len);
+                       dev_kfree_skb_any(skb);
+                       return -EOVERFLOW;
+               }
+               if (le32_to_cpu(hdr->MessageType) != RNDIS_MSG_PACKET) {
+                       pr_err("invalid rndis message: %d/%d/%d/%d, len:%d\n",
+                                       le32_to_cpu(hdr->MessageType),
+                                       msg_len, data_offset, data_len, skb->len);
+                       dev_kfree_skb_any(skb);
+                       return -EINVAL;
+               }
+
+               skb_pull(skb, data_offset + 8);
+
+               if (msg_len == skb->len) {
+                       skb_trim(skb, data_len);
+                       break;
+               }
+
+               skb2 = skb_clone(skb, GFP_ATOMIC);
+               if (!skb2) {
+                       pr_err("%s:skb clone failed\n", __func__);
+                       dev_kfree_skb_any(skb);
+                       return -ENOMEM;
+               }
+
+               skb_pull(skb, msg_len - sizeof *hdr);
+               skb_trim(skb2, data_len);
+               skb_queue_tail(list, skb2);
+
+               num_pkts++;
        }
-       skb_trim(skb, get_unaligned_le32(tmp++));
+
+       if (num_pkts > rndis_ul_max_pkt_per_xfer_rcvd)
+               rndis_ul_max_pkt_per_xfer_rcvd = num_pkts;
 
        skb_queue_tail(list, skb);
        return 0;
index ef92eb66d8adf91fac1cc7dea85490ebc2f0895a..310cac3f088e8297dcf7137991af9a7961f85158 100644 (file)
@@ -190,6 +190,7 @@ typedef struct rndis_params
        struct net_device       *dev;
 
        u32                     vendorID;
+       u8                      max_pkt_per_xfer;
        const char              *vendorDescr;
        void                    (*resp_avail)(void *v);
        void                    *v;
@@ -206,6 +207,7 @@ int  rndis_set_param_vendor(struct rndis_params *params, u32 vendorID,
                            const char *vendorDescr);
 int  rndis_set_param_medium(struct rndis_params *params, u32 medium,
                             u32 speed);
+void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer);
 void rndis_add_hdr(struct sk_buff *skb);
 int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
                        struct sk_buff_head *list);
index 7413f89660f7307d3ed8b83887cbe1a82fe1c891..e4920e5e1d647ff92c5118545309f1eb99396c36 100644 (file)
@@ -53,6 +53,8 @@
  * blocks and still have efficient handling. */
 #define GETHER_MAX_ETH_FRAME_LEN 15412
 
+static struct workqueue_struct *uether_wq;
+
 struct eth_dev {
        /* lock is held while accessing port_usb
         */
@@ -64,19 +66,27 @@ struct eth_dev {
 
        spinlock_t              req_lock;       /* guard {rx,tx}_reqs */
        struct list_head        tx_reqs, rx_reqs;
-       atomic_t                tx_qlen;
+       unsigned                tx_qlen;
+/* Minimum number of TX USB request queued to UDC */
+#define TX_REQ_THRESHOLD       5
+       int                     no_tx_req_used;
+       int                     tx_skb_hold_count;
+       u32                     tx_req_bufsize;
 
        struct sk_buff_head     rx_frames;
 
        unsigned                qmult;
 
        unsigned                header_len;
+       unsigned                ul_max_pkts_per_xfer;
+       unsigned                dl_max_pkts_per_xfer;
        struct sk_buff          *(*wrap)(struct gether *, struct sk_buff *skb);
        int                     (*unwrap)(struct gether *,
                                                struct sk_buff *skb,
                                                struct sk_buff_head *list);
 
        struct work_struct      work;
+       struct work_struct      rx_work;
 
        unsigned long           todo;
 #define        WORK_RX_MEMORY          0
@@ -230,9 +240,13 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
        size += out->maxpacket - 1;
        size -= size % out->maxpacket;
 
+       if (dev->ul_max_pkts_per_xfer)
+               size *= dev->ul_max_pkts_per_xfer;
+
        if (dev->port_usb->is_fixed)
                size = max_t(size_t, size, dev->port_usb->fixed_out_len);
 
+       DBG(dev, "%s: size: %zd\n", __func__, size);
        skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
        if (skb == NULL) {
                DBG(dev, "no rx skb\n");
@@ -258,18 +272,16 @@ enomem:
                DBG(dev, "rx submit --> %d\n", retval);
                if (skb)
                        dev_kfree_skb_any(skb);
-               spin_lock_irqsave(&dev->req_lock, flags);
-               list_add(&req->list, &dev->rx_reqs);
-               spin_unlock_irqrestore(&dev->req_lock, flags);
        }
        return retval;
 }
 
 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
 {
-       struct sk_buff  *skb = req->context, *skb2;
+       struct sk_buff  *skb = req->context;
        struct eth_dev  *dev = ep->driver_data;
        int             status = req->status;
+       bool            queue = 0;
 
        switch (status) {
 
@@ -285,6 +297,10 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
                                status = dev->unwrap(dev->port_usb,
                                                        skb,
                                                        &dev->rx_frames);
+                               if (status == -EINVAL)
+                                       dev->net->stats.rx_errors++;
+                               else if (status == -EOVERFLOW)
+                                       dev->net->stats.rx_over_errors++;
                        } else {
                                dev_kfree_skb_any(skb);
                                status = -ENOTCONN;
@@ -293,30 +309,8 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
                } else {
                        skb_queue_tail(&dev->rx_frames, skb);
                }
-               skb = NULL;
-
-               skb2 = skb_dequeue(&dev->rx_frames);
-               while (skb2) {
-                       if (status < 0
-                                       || ETH_HLEN > skb2->len
-                                       || skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
-                               dev->net->stats.rx_errors++;
-                               dev->net->stats.rx_length_errors++;
-                               DBG(dev, "rx length %d\n", skb2->len);
-                               dev_kfree_skb_any(skb2);
-                               goto next_frame;
-                       }
-                       skb2->protocol = eth_type_trans(skb2, dev->net);
-                       dev->net->stats.rx_packets++;
-                       dev->net->stats.rx_bytes += skb2->len;
-
-                       /* no buffer copies needed, unless hardware can't
-                        * use skb buffers.
-                        */
-                       status = netif_rx(skb2);
-next_frame:
-                       skb2 = skb_dequeue(&dev->rx_frames);
-               }
+               if (!status)
+                       queue = 1;
                break;
 
        /* software-driven interface shutdown */
@@ -339,22 +333,20 @@ quiesce:
                /* FALLTHROUGH */
 
        default:
+               queue = 1;
+               dev_kfree_skb_any(skb);
                dev->net->stats.rx_errors++;
                DBG(dev, "rx status %d\n", status);
                break;
        }
 
-       if (skb)
-               dev_kfree_skb_any(skb);
-       if (!netif_running(dev->net)) {
 clean:
-               spin_lock(&dev->req_lock);
-               list_add(&req->list, &dev->rx_reqs);
-               spin_unlock(&dev->req_lock);
-               req = NULL;
-       }
-       if (req)
-               rx_submit(dev, req, GFP_ATOMIC);
+       spin_lock(&dev->req_lock);
+       list_add(&req->list, &dev->rx_reqs);
+       spin_unlock(&dev->req_lock);
+
+       if (queue)
+               queue_work(uether_wq, &dev->rx_work);
 }
 
 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
@@ -419,16 +411,24 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 {
        struct usb_request      *req;
        unsigned long           flags;
+       int                     req_cnt = 0;
 
        /* fill unused rxq slots with some skb */
        spin_lock_irqsave(&dev->req_lock, flags);
        while (!list_empty(&dev->rx_reqs)) {
+               /* break the nexus of continuous completion and re-submission*/
+               if (++req_cnt > qlen(dev->gadget, dev->qmult))
+                       break;
+
                req = container_of(dev->rx_reqs.next,
                                struct usb_request, list);
                list_del_init(&req->list);
                spin_unlock_irqrestore(&dev->req_lock, flags);
 
                if (rx_submit(dev, req, gfp_flags) < 0) {
+                       spin_lock_irqsave(&dev->req_lock, flags);
+                       list_add(&req->list, &dev->rx_reqs);
+                       spin_unlock_irqrestore(&dev->req_lock, flags);
                        defer_kevent(dev, WORK_RX_MEMORY);
                        return;
                }
@@ -438,6 +438,36 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
        spin_unlock_irqrestore(&dev->req_lock, flags);
 }
 
+static void process_rx_w(struct work_struct *work)
+{
+       struct eth_dev  *dev = container_of(work, struct eth_dev, rx_work);
+       struct sk_buff  *skb;
+       int             status = 0;
+
+       if (!dev->port_usb)
+               return;
+
+       while ((skb = skb_dequeue(&dev->rx_frames))) {
+               if (status < 0
+                               || ETH_HLEN > skb->len
+                               || skb->len > ETH_FRAME_LEN) {
+                       dev->net->stats.rx_errors++;
+                       dev->net->stats.rx_length_errors++;
+                       DBG(dev, "rx length %d\n", skb->len);
+                       dev_kfree_skb_any(skb);
+                       continue;
+               }
+               skb->protocol = eth_type_trans(skb, dev->net);
+               dev->net->stats.rx_packets++;
+               dev->net->stats.rx_bytes += skb->len;
+
+               status = netif_rx_ni(skb);
+       }
+
+       if (netif_running(dev->net))
+               rx_fill(dev, GFP_KERNEL);
+}
+
 static void eth_work(struct work_struct *work)
 {
        struct eth_dev  *dev = container_of(work, struct eth_dev, work);
@@ -455,6 +485,11 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req)
 {
        struct sk_buff  *skb = req->context;
        struct eth_dev  *dev = ep->driver_data;
+       struct net_device *net = dev->net;
+       struct usb_request *new_req;
+       struct usb_ep *in;
+       int length;
+       int retval;
 
        switch (req->status) {
        default:
@@ -465,16 +500,74 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req)
        case -ESHUTDOWN:                /* disconnect etc */
                break;
        case 0:
-               dev->net->stats.tx_bytes += skb->len;
+               if (!req->zero)
+                       dev->net->stats.tx_bytes += req->length-1;
+               else
+                       dev->net->stats.tx_bytes += req->length;
        }
        dev->net->stats.tx_packets++;
 
        spin_lock(&dev->req_lock);
-       list_add(&req->list, &dev->tx_reqs);
-       spin_unlock(&dev->req_lock);
-       dev_kfree_skb_any(skb);
+       list_add_tail(&req->list, &dev->tx_reqs);
+
+       if (dev->port_usb->multi_pkt_xfer) {
+               dev->no_tx_req_used--;
+               req->length = 0;
+               in = dev->port_usb->in_ep;
+
+               if (!list_empty(&dev->tx_reqs)) {
+                       new_req = container_of(dev->tx_reqs.next,
+                                       struct usb_request, list);
+                       list_del(&new_req->list);
+                       spin_unlock(&dev->req_lock);
+                       if (new_req->length > 0) {
+                               length = new_req->length;
+
+                               /* NCM requires no zlp if transfer is
+                                * dwNtbInMaxSize */
+                               if (dev->port_usb->is_fixed &&
+                                       length == dev->port_usb->fixed_in_len &&
+                                       (length % in->maxpacket) == 0)
+                                       new_req->zero = 0;
+                               else
+                                       new_req->zero = 1;
+
+                               /* use zlp framing on tx for strict CDC-Ether
+                                * conformance, though any robust network rx
+                                * path ignores extra padding. and some hardware
+                                * doesn't like to write zlps.
+                                */
+                               if (new_req->zero && !dev->zlp &&
+                                               (length % in->maxpacket) == 0) {
+                                       new_req->zero = 0;
+                                       length++;
+                               }
+
+                               new_req->length = length;
+                               retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
+                               switch (retval) {
+                               default:
+                                       DBG(dev, "tx queue err %d\n", retval);
+                                       break;
+                               case 0:
+                                       spin_lock(&dev->req_lock);
+                                       dev->no_tx_req_used++;
+                                       spin_unlock(&dev->req_lock);
+                                       net->trans_start = jiffies;
+                               }
+                       } else {
+                               spin_lock(&dev->req_lock);
+                               list_add(&new_req->list, &dev->tx_reqs);
+                               spin_unlock(&dev->req_lock);
+                       }
+               } else {
+                       spin_unlock(&dev->req_lock);
+               }
+       } else {
+               spin_unlock(&dev->req_lock);
+               dev_kfree_skb_any(skb);
+       }
 
-       atomic_dec(&dev->tx_qlen);
        if (netif_carrier_ok(dev->net))
                netif_wake_queue(dev->net);
 }
@@ -484,6 +577,26 @@ static inline int is_promisc(u16 cdc_filter)
        return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
 }
 
+static void alloc_tx_buffer(struct eth_dev *dev)
+{
+       struct list_head        *act;
+       struct usb_request      *req;
+
+       dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer *
+                               (dev->net->mtu
+                               + sizeof(struct ethhdr)
+                               /* size of rndis_packet_msg_type */
+                               + 44
+                               + 22));
+
+       list_for_each(act, &dev->tx_reqs) {
+               req = container_of(act, struct usb_request, list);
+               if (!req->buf)
+                       req->buf = kmalloc(dev->tx_req_bufsize,
+                                               GFP_ATOMIC);
+       }
+}
+
 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                                        struct net_device *net)
 {
@@ -510,6 +623,10 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
+       /* Allocate memory for tx_reqs to support multi packet transfer */
+       if (dev->port_usb->multi_pkt_xfer && !dev->tx_req_bufsize)
+               alloc_tx_buffer(dev);
+
        /* apply outgoing CDC or RNDIS filters */
        if (skb && !is_promisc(cdc_filter)) {
                u8              *dest = skb->data;
@@ -572,9 +689,37 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                }
        }
 
-       length = skb->len;
-       req->buf = skb->data;
-       req->context = skb;
+       spin_lock_irqsave(&dev->req_lock, flags);
+       dev->tx_skb_hold_count++;
+       spin_unlock_irqrestore(&dev->req_lock, flags);
+
+       if (dev->port_usb->multi_pkt_xfer) {
+               memcpy(req->buf + req->length, skb->data, skb->len);
+               req->length = req->length + skb->len;
+               length = req->length;
+               dev_kfree_skb_any(skb);
+
+               spin_lock_irqsave(&dev->req_lock, flags);
+               if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) {
+                       if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
+                               list_add(&req->list, &dev->tx_reqs);
+                               spin_unlock_irqrestore(&dev->req_lock, flags);
+                               goto success;
+                       }
+               }
+
+               dev->no_tx_req_used++;
+               spin_unlock_irqrestore(&dev->req_lock, flags);
+
+               spin_lock_irqsave(&dev->lock, flags);
+               dev->tx_skb_hold_count = 0;
+               spin_unlock_irqrestore(&dev->lock, flags);
+       } else {
+               length = skb->len;
+               req->buf = skb->data;
+               req->context = skb;
+       }
+
        req->complete = tx_complete;
 
        /* NCM requires no zlp if transfer is dwNtbInMaxSize */
@@ -589,11 +734,28 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
         * though any robust network rx path ignores extra padding.
         * and some hardware doesn't like to write zlps.
         */
-       if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
+       if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
+               req->zero = 0;
                length++;
+       }
 
        req->length = length;
 
+       /* throttle highspeed IRQ rate back slightly */
+       if (gadget_is_dualspeed(dev->gadget) &&
+                        (dev->gadget->speed == USB_SPEED_HIGH) &&
+                        !list_empty(&dev->tx_reqs)) {
+               dev->tx_qlen++;
+               if (dev->tx_qlen == (dev->qmult/2)) {
+                       req->no_interrupt = 0;
+                       dev->tx_qlen = 0;
+               } else {
+                       req->no_interrupt = 1;
+               }
+       } else {
+               req->no_interrupt = 0;
+       }
+
        retval = usb_ep_queue(in, req, GFP_ATOMIC);
        switch (retval) {
        default:
@@ -601,11 +763,11 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                break;
        case 0:
                net->trans_start = jiffies;
-               atomic_inc(&dev->tx_qlen);
        }
 
        if (retval) {
-               dev_kfree_skb_any(skb);
+               if (!dev->port_usb->multi_pkt_xfer)
+                       dev_kfree_skb_any(skb);
 drop:
                dev->net->stats.tx_dropped++;
 multiframe:
@@ -615,6 +777,7 @@ multiframe:
                list_add(&req->list, &dev->tx_reqs);
                spin_unlock_irqrestore(&dev->req_lock, flags);
        }
+success:
        return NETDEV_TX_OK;
 }
 
@@ -628,7 +791,7 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
        rx_fill(dev, gfp_flags);
 
        /* and open the tx floodgates */
-       atomic_set(&dev->tx_qlen, 0);
+       dev->tx_qlen = 0;
        netif_wake_queue(dev->net);
 }
 
@@ -774,6 +937,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
        spin_lock_init(&dev->lock);
        spin_lock_init(&dev->req_lock);
        INIT_WORK(&dev->work, eth_work);
+       INIT_WORK(&dev->rx_work, process_rx_w);
        INIT_LIST_HEAD(&dev->tx_reqs);
        INIT_LIST_HEAD(&dev->rx_reqs);
 
@@ -836,6 +1000,7 @@ struct net_device *gether_setup_name_default(const char *netname)
        spin_lock_init(&dev->lock);
        spin_lock_init(&dev->req_lock);
        INIT_WORK(&dev->work, eth_work);
+       INIT_WORK(&dev->rx_work, process_rx_w);
        INIT_LIST_HEAD(&dev->tx_reqs);
        INIT_LIST_HEAD(&dev->rx_reqs);
 
@@ -1070,8 +1235,13 @@ struct net_device *gether_connect(struct gether *link)
                dev->header_len = link->header_len;
                dev->unwrap = link->unwrap;
                dev->wrap = link->wrap;
+               dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
+               dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer;
 
                spin_lock(&dev->lock);
+               dev->tx_skb_hold_count = 0;
+               dev->no_tx_req_used = 0;
+               dev->tx_req_bufsize = 0;
                dev->port_usb = link;
                if (netif_running(dev->net)) {
                        if (link->open)
@@ -1116,6 +1286,7 @@ void gether_disconnect(struct gether *link)
 {
        struct eth_dev          *dev = link->ioport;
        struct usb_request      *req;
+       struct sk_buff          *skb;
 
        WARN_ON(!dev);
        if (!dev)
@@ -1138,6 +1309,8 @@ void gether_disconnect(struct gether *link)
                list_del(&req->list);
 
                spin_unlock(&dev->req_lock);
+               if (link->multi_pkt_xfer)
+                       kfree(req->buf);
                usb_ep_free_request(link->in_ep, req);
                spin_lock(&dev->req_lock);
        }
@@ -1156,6 +1329,12 @@ void gether_disconnect(struct gether *link)
                spin_lock(&dev->req_lock);
        }
        spin_unlock(&dev->req_lock);
+
+       spin_lock(&dev->rx_frames.lock);
+       while ((skb = __skb_dequeue(&dev->rx_frames)))
+               dev_kfree_skb_any(skb);
+       spin_unlock(&dev->rx_frames.lock);
+
        link->out_ep->desc = NULL;
 
        /* finish forgetting about this USB link episode */
@@ -1169,5 +1348,23 @@ void gether_disconnect(struct gether *link)
 }
 EXPORT_SYMBOL_GPL(gether_disconnect);
 
-MODULE_LICENSE("GPL");
+static int __init gether_init(void)
+{
+       uether_wq  = create_singlethread_workqueue("uether");
+       if (!uether_wq) {
+               pr_err("%s: Unable to create workqueue: uether\n", __func__);
+               return -ENOMEM;
+       }
+       return 0;
+}
+module_init(gether_init);
+
+static void __exit gether_exit(void)
+{
+       destroy_workqueue(uether_wq);
+
+}
+module_exit(gether_exit);
 MODULE_AUTHOR("David Brownell");
+MODULE_DESCRIPTION("ethernet over USB driver");
+MODULE_LICENSE("GPL v2");
index c77145bd6b5b94f65b8531f2ab061c2770edbae5..e9fd14b461026e1b878f4d1c45c7ac00d0ad704f 100644 (file)
@@ -73,6 +73,9 @@ struct gether {
        bool                            is_fixed;
        u32                             fixed_out_len;
        u32                             fixed_in_len;
+       unsigned                ul_max_pkts_per_xfer;
+       unsigned                dl_max_pkts_per_xfer;
+       bool                            multi_pkt_xfer;
        bool                            supports_multi_frame;
        struct sk_buff                  *(*wrap)(struct gether *port,
                                                struct sk_buff *skb);
index 22e8ecb6bfbd282236a77525cf4cabda25555c66..795485eac7b07f140d6bc243dc01480e1824729b 100644 (file)
@@ -6,6 +6,14 @@ menu "USB Physical Layer drivers"
 config USB_PHY
        def_bool n
 
+config USB_OTG_WAKELOCK
+       bool "Hold a wakelock when USB connected"
+       depends on WAKELOCK
+       select USB_OTG_UTILS
+       help
+         Select this to automatically hold a wakelock when USB is
+         connected, preventing suspend.
+
 #
 # USB Transceiver Drivers
 #
@@ -213,4 +221,13 @@ config USB_ULPI_VIEWPORT
          Provides read/write operations to the ULPI phy register set for
          controllers with a viewport register (e.g. Chipidea/ARC controllers).
 
+config DUAL_ROLE_USB_INTF
+       bool "Generic DUAL ROLE sysfs interface"
+       depends on SYSFS && USB_PHY
+       help
+         A generic sysfs interface to track and change the state of
+         dual role usb phys. The usb phy drivers can register to
+         this interface to expose it capabilities to the userspace
+         and thereby allowing userspace to change the port mode.
+
 endmenu
index 19c0dccbb1161b11ba001ee62181027a614be187..f7543f3b99439535135547d76bd020536828677f 100644 (file)
@@ -3,6 +3,8 @@
 #
 obj-$(CONFIG_USB_PHY)                  += phy.o
 obj-$(CONFIG_OF)                       += of.o
+obj-$(CONFIG_USB_OTG_WAKELOCK)         += otg-wakelock.o
+obj-$(CONFIG_DUAL_ROLE_USB_INTF)       += class-dual-role.o
 
 # transceiver drivers, keep the list sorted
 
diff --git a/drivers/usb/phy/class-dual-role.c b/drivers/usb/phy/class-dual-role.c
new file mode 100644 (file)
index 0000000..51fcb54
--- /dev/null
@@ -0,0 +1,529 @@
+/*
+ * class-dual-role.c
+ *
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/usb/class-dual-role.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+
+#define DUAL_ROLE_NOTIFICATION_TIMEOUT 2000
+
+static ssize_t dual_role_store_property(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t count);
+static ssize_t dual_role_show_property(struct device *dev,
+                                      struct device_attribute *attr,
+                                      char *buf);
+
+#define DUAL_ROLE_ATTR(_name)                          \
+{                                                      \
+       .attr = { .name = #_name },                     \
+       .show = dual_role_show_property,                \
+       .store = dual_role_store_property,              \
+}
+
+static struct device_attribute dual_role_attrs[] = {
+       DUAL_ROLE_ATTR(supported_modes),
+       DUAL_ROLE_ATTR(mode),
+       DUAL_ROLE_ATTR(power_role),
+       DUAL_ROLE_ATTR(data_role),
+       DUAL_ROLE_ATTR(powers_vconn),
+};
+
+struct class *dual_role_class;
+EXPORT_SYMBOL_GPL(dual_role_class);
+
+static struct device_type dual_role_dev_type;
+
+static char *kstrdupcase(const char *str, gfp_t gfp, bool to_upper)
+{
+       char *ret, *ustr;
+
+       ustr = ret = kmalloc(strlen(str) + 1, gfp);
+
+       if (!ret)
+               return NULL;
+
+       while (*str)
+               *ustr++ = to_upper ? toupper(*str++) : tolower(*str++);
+
+       *ustr = 0;
+
+       return ret;
+}
+
+static void dual_role_changed_work(struct work_struct *work)
+{
+       struct dual_role_phy_instance *dual_role =
+           container_of(work, struct dual_role_phy_instance,
+                        changed_work);
+
+       dev_dbg(&dual_role->dev, "%s\n", __func__);
+       kobject_uevent(&dual_role->dev.kobj, KOBJ_CHANGE);
+}
+
+void dual_role_instance_changed(struct dual_role_phy_instance *dual_role)
+{
+       dev_dbg(&dual_role->dev, "%s\n", __func__);
+       pm_wakeup_event(&dual_role->dev, DUAL_ROLE_NOTIFICATION_TIMEOUT);
+       schedule_work(&dual_role->changed_work);
+}
+EXPORT_SYMBOL_GPL(dual_role_instance_changed);
+
+int dual_role_get_property(struct dual_role_phy_instance *dual_role,
+                          enum dual_role_property prop,
+                          unsigned int *val)
+{
+       return dual_role->desc->get_property(dual_role, prop, val);
+}
+EXPORT_SYMBOL_GPL(dual_role_get_property);
+
+int dual_role_set_property(struct dual_role_phy_instance *dual_role,
+                          enum dual_role_property prop,
+                          const unsigned int *val)
+{
+       if (!dual_role->desc->set_property)
+               return -ENODEV;
+
+       return dual_role->desc->set_property(dual_role, prop, val);
+}
+EXPORT_SYMBOL_GPL(dual_role_set_property);
+
+int dual_role_property_is_writeable(struct dual_role_phy_instance *dual_role,
+                                   enum dual_role_property prop)
+{
+       if (!dual_role->desc->property_is_writeable)
+               return -ENODEV;
+
+       return dual_role->desc->property_is_writeable(dual_role, prop);
+}
+EXPORT_SYMBOL_GPL(dual_role_property_is_writeable);
+
+static void dual_role_dev_release(struct device *dev)
+{
+       struct dual_role_phy_instance *dual_role =
+           container_of(dev, struct dual_role_phy_instance, dev);
+       pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
+       kfree(dual_role);
+}
+
+static struct dual_role_phy_instance *__must_check
+__dual_role_register(struct device *parent,
+                    const struct dual_role_phy_desc *desc)
+{
+       struct device *dev;
+       struct dual_role_phy_instance *dual_role;
+       int rc;
+
+       dual_role = kzalloc(sizeof(*dual_role), GFP_KERNEL);
+       if (!dual_role)
+               return ERR_PTR(-ENOMEM);
+
+       dev = &dual_role->dev;
+
+       device_initialize(dev);
+
+       dev->class = dual_role_class;
+       dev->type = &dual_role_dev_type;
+       dev->parent = parent;
+       dev->release = dual_role_dev_release;
+       dev_set_drvdata(dev, dual_role);
+       dual_role->desc = desc;
+
+       rc = dev_set_name(dev, "%s", desc->name);
+       if (rc)
+               goto dev_set_name_failed;
+
+       INIT_WORK(&dual_role->changed_work, dual_role_changed_work);
+
+       rc = device_init_wakeup(dev, true);
+       if (rc)
+               goto wakeup_init_failed;
+
+       rc = device_add(dev);
+       if (rc)
+               goto device_add_failed;
+
+       dual_role_instance_changed(dual_role);
+
+       return dual_role;
+
+device_add_failed:
+       device_init_wakeup(dev, false);
+wakeup_init_failed:
+dev_set_name_failed:
+       put_device(dev);
+       kfree(dual_role);
+
+       return ERR_PTR(rc);
+}
+
+static void dual_role_instance_unregister(struct dual_role_phy_instance
+                                         *dual_role)
+{
+       cancel_work_sync(&dual_role->changed_work);
+       device_init_wakeup(&dual_role->dev, false);
+       device_unregister(&dual_role->dev);
+}
+
+static void devm_dual_role_release(struct device *dev, void *res)
+{
+       struct dual_role_phy_instance **dual_role = res;
+
+       dual_role_instance_unregister(*dual_role);
+}
+
+struct dual_role_phy_instance *__must_check
+devm_dual_role_instance_register(struct device *parent,
+                                const struct dual_role_phy_desc *desc)
+{
+       struct dual_role_phy_instance **ptr, *dual_role;
+
+       ptr = devres_alloc(devm_dual_role_release, sizeof(*ptr), GFP_KERNEL);
+
+       if (!ptr)
+               return ERR_PTR(-ENOMEM);
+       dual_role = __dual_role_register(parent, desc);
+       if (IS_ERR(dual_role)) {
+               devres_free(ptr);
+       } else {
+               *ptr = dual_role;
+               devres_add(parent, ptr);
+       }
+       return dual_role;
+}
+EXPORT_SYMBOL_GPL(devm_dual_role_instance_register);
+
+static int devm_dual_role_match(struct device *dev, void *res, void *data)
+{
+       struct dual_role_phy_instance **r = res;
+
+       if (WARN_ON(!r || !*r))
+               return 0;
+
+       return *r == data;
+}
+
+void devm_dual_role_instance_unregister(struct device *dev,
+                                       struct dual_role_phy_instance
+                                       *dual_role)
+{
+       int rc;
+
+       rc = devres_release(dev, devm_dual_role_release,
+                           devm_dual_role_match, dual_role);
+       WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_dual_role_instance_unregister);
+
+void *dual_role_get_drvdata(struct dual_role_phy_instance *dual_role)
+{
+       return dual_role->drv_data;
+}
+EXPORT_SYMBOL_GPL(dual_role_get_drvdata);
+
+/***************** Device attribute functions **************************/
+
+/* port type */
+static char *supported_modes_text[] = {
+       "ufp dfp", "dfp", "ufp"
+};
+
+/* current mode */
+static char *mode_text[] = {
+       "ufp", "dfp", "none"
+};
+
+/* Power role */
+static char *pr_text[] = {
+       "source", "sink", "none"
+};
+
+/* Data role */
+static char *dr_text[] = {
+       "host", "device", "none"
+};
+
+/* Vconn supply */
+static char *vconn_supply_text[] = {
+       "n", "y"
+};
+
+static ssize_t dual_role_show_property(struct device *dev,
+                                      struct device_attribute *attr, char *buf)
+{
+       ssize_t ret = 0;
+       struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev);
+       const ptrdiff_t off = attr - dual_role_attrs;
+       unsigned int value;
+
+       if (off == DUAL_ROLE_PROP_SUPPORTED_MODES) {
+               value = dual_role->desc->supported_modes;
+       } else {
+               ret = dual_role_get_property(dual_role, off, &value);
+
+               if (ret < 0) {
+                       if (ret == -ENODATA)
+                               dev_dbg(dev,
+                                       "driver has no data for `%s' property\n",
+                                       attr->attr.name);
+                       else if (ret != -ENODEV)
+                               dev_err(dev,
+                                       "driver failed to report `%s' property: %zd\n",
+                                       attr->attr.name, ret);
+                       return ret;
+               }
+       }
+
+       if (off == DUAL_ROLE_PROP_SUPPORTED_MODES) {
+               BUILD_BUG_ON(DUAL_ROLE_PROP_SUPPORTED_MODES_TOTAL !=
+                       ARRAY_SIZE(supported_modes_text));
+               if (value < DUAL_ROLE_PROP_SUPPORTED_MODES_TOTAL)
+                       return snprintf(buf, PAGE_SIZE, "%s\n",
+                                       supported_modes_text[value]);
+               else
+                       return -EIO;
+       } else if (off == DUAL_ROLE_PROP_MODE) {
+               BUILD_BUG_ON(DUAL_ROLE_PROP_MODE_TOTAL !=
+                       ARRAY_SIZE(mode_text));
+               if (value < DUAL_ROLE_PROP_MODE_TOTAL)
+                       return snprintf(buf, PAGE_SIZE, "%s\n",
+                                       mode_text[value]);
+               else
+                       return -EIO;
+       } else if (off == DUAL_ROLE_PROP_PR) {
+               BUILD_BUG_ON(DUAL_ROLE_PROP_PR_TOTAL != ARRAY_SIZE(pr_text));
+               if (value < DUAL_ROLE_PROP_PR_TOTAL)
+                       return snprintf(buf, PAGE_SIZE, "%s\n",
+                                       pr_text[value]);
+               else
+                       return -EIO;
+       } else if (off == DUAL_ROLE_PROP_DR) {
+               BUILD_BUG_ON(DUAL_ROLE_PROP_DR_TOTAL != ARRAY_SIZE(dr_text));
+               if (value < DUAL_ROLE_PROP_DR_TOTAL)
+                       return snprintf(buf, PAGE_SIZE, "%s\n",
+                                       dr_text[value]);
+               else
+                       return -EIO;
+       } else if (off == DUAL_ROLE_PROP_VCONN_SUPPLY) {
+               BUILD_BUG_ON(DUAL_ROLE_PROP_VCONN_SUPPLY_TOTAL !=
+                               ARRAY_SIZE(vconn_supply_text));
+               if (value < DUAL_ROLE_PROP_VCONN_SUPPLY_TOTAL)
+                       return snprintf(buf, PAGE_SIZE, "%s\n",
+                                       vconn_supply_text[value]);
+               else
+                       return -EIO;
+       } else
+               return -EIO;
+}
+
+static ssize_t dual_role_store_property(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t count)
+{
+       ssize_t ret;
+       struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev);
+       const ptrdiff_t off = attr - dual_role_attrs;
+       unsigned int value;
+       int total, i;
+       char *dup_buf, **text_array;
+       bool result = false;
+
+       dup_buf = kstrdupcase(buf, GFP_KERNEL, false);
+       switch (off) {
+       case DUAL_ROLE_PROP_MODE:
+               total = DUAL_ROLE_PROP_MODE_TOTAL;
+               text_array = mode_text;
+               break;
+       case DUAL_ROLE_PROP_PR:
+               total = DUAL_ROLE_PROP_PR_TOTAL;
+               text_array = pr_text;
+               break;
+       case DUAL_ROLE_PROP_DR:
+               total = DUAL_ROLE_PROP_DR_TOTAL;
+               text_array = dr_text;
+               break;
+       case DUAL_ROLE_PROP_VCONN_SUPPLY:
+               ret = strtobool(dup_buf, &result);
+               value = result;
+               if (!ret)
+                       goto setprop;
+       default:
+               ret = -EINVAL;
+               goto error;
+       }
+
+       for (i = 0; i <= total; i++) {
+               if (i == total) {
+                       ret = -ENOTSUPP;
+                       goto error;
+               }
+               if (!strncmp(*(text_array + i), dup_buf,
+                            strlen(*(text_array + i)))) {
+                       value = i;
+                       break;
+               }
+       }
+
+setprop:
+       ret = dual_role->desc->set_property(dual_role, off, &value);
+
+error:
+       kfree(dup_buf);
+
+       if (ret < 0)
+               return ret;
+
+       return count;
+}
+
+static umode_t dual_role_attr_is_visible(struct kobject *kobj,
+                                        struct attribute *attr, int attrno)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev);
+       umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
+       int i;
+
+       if (attrno == DUAL_ROLE_PROP_SUPPORTED_MODES)
+               return mode;
+
+       for (i = 0; i < dual_role->desc->num_properties; i++) {
+               int property = dual_role->desc->properties[i];
+
+               if (property == attrno) {
+                       if (dual_role->desc->property_is_writeable &&
+                           dual_role_property_is_writeable(dual_role, property)
+                           > 0)
+                               mode |= S_IWUSR;
+
+                       return mode;
+               }
+       }
+
+       return 0;
+}
+
+static struct attribute *__dual_role_attrs[ARRAY_SIZE(dual_role_attrs) + 1];
+
+static struct attribute_group dual_role_attr_group = {
+       .attrs = __dual_role_attrs,
+       .is_visible = dual_role_attr_is_visible,
+};
+
+static const struct attribute_group *dual_role_attr_groups[] = {
+       &dual_role_attr_group,
+       NULL,
+};
+
+void dual_role_init_attrs(struct device_type *dev_type)
+{
+       int i;
+
+       dev_type->groups = dual_role_attr_groups;
+
+       for (i = 0; i < ARRAY_SIZE(dual_role_attrs); i++)
+               __dual_role_attrs[i] = &dual_role_attrs[i].attr;
+}
+
+int dual_role_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev);
+       int ret = 0, j;
+       char *prop_buf;
+       char *attrname;
+
+       dev_dbg(dev, "uevent\n");
+
+       if (!dual_role || !dual_role->desc) {
+               dev_dbg(dev, "No dual_role phy yet\n");
+               return ret;
+       }
+
+       dev_dbg(dev, "DUAL_ROLE_NAME=%s\n", dual_role->desc->name);
+
+       ret = add_uevent_var(env, "DUAL_ROLE_NAME=%s", dual_role->desc->name);
+       if (ret)
+               return ret;
+
+       prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+       if (!prop_buf)
+               return -ENOMEM;
+
+       for (j = 0; j < dual_role->desc->num_properties; j++) {
+               struct device_attribute *attr;
+               char *line;
+
+               attr = &dual_role_attrs[dual_role->desc->properties[j]];
+
+               ret = dual_role_show_property(dev, attr, prop_buf);
+               if (ret == -ENODEV || ret == -ENODATA) {
+                       ret = 0;
+                       continue;
+               }
+
+               if (ret < 0)
+                       goto out;
+               line = strnchr(prop_buf, PAGE_SIZE, '\n');
+               if (line)
+                       *line = 0;
+
+               attrname = kstrdupcase(attr->attr.name, GFP_KERNEL, true);
+               if (!attrname)
+                       ret = -ENOMEM;
+
+               dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf);
+
+               ret = add_uevent_var(env, "DUAL_ROLE_%s=%s", attrname,
+                                    prop_buf);
+               kfree(attrname);
+               if (ret)
+                       goto out;
+       }
+
+out:
+       free_page((unsigned long)prop_buf);
+
+       return ret;
+}
+
+/******************* Module Init ***********************************/
+
+static int __init dual_role_class_init(void)
+{
+       dual_role_class = class_create(THIS_MODULE, "dual_role_usb");
+
+       if (IS_ERR(dual_role_class))
+               return PTR_ERR(dual_role_class);
+
+       dual_role_class->dev_uevent = dual_role_uevent;
+       dual_role_init_attrs(&dual_role_dev_type);
+
+       return 0;
+}
+
+static void __exit dual_role_class_exit(void)
+{
+       class_destroy(dual_role_class);
+}
+
+subsys_initcall(dual_role_class_init);
+module_exit(dual_role_class_exit);
diff --git a/drivers/usb/phy/otg-wakelock.c b/drivers/usb/phy/otg-wakelock.c
new file mode 100644 (file)
index 0000000..479376b
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+ * otg-wakelock.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/wakelock.h>
+#include <linux/spinlock.h>
+#include <linux/usb/otg.h>
+
+#define TEMPORARY_HOLD_TIME    2000
+
+static bool enabled = true;
+static struct usb_phy *otgwl_xceiv;
+static struct notifier_block otgwl_nb;
+
+/*
+ * otgwl_spinlock is held while the VBUS lock is grabbed or dropped and the
+ * held field is updated to match.
+ */
+
+static DEFINE_SPINLOCK(otgwl_spinlock);
+
+/*
+ * Only one lock, but since these 3 fields are associated with each other...
+ */
+
+struct otgwl_lock {
+       char name[40];
+       struct wake_lock wakelock;
+       bool held;
+};
+
+/*
+ * VBUS present lock.  Also used as a timed lock on charger
+ * connect/disconnect and USB host disconnect, to allow the system
+ * to react to the change in power.
+ */
+
+static struct otgwl_lock vbus_lock;
+
+static void otgwl_hold(struct otgwl_lock *lock)
+{
+       if (!lock->held) {
+               wake_lock(&lock->wakelock);
+               lock->held = true;
+       }
+}
+
+static void otgwl_temporary_hold(struct otgwl_lock *lock)
+{
+       wake_lock_timeout(&lock->wakelock,
+                         msecs_to_jiffies(TEMPORARY_HOLD_TIME));
+       lock->held = false;
+}
+
+static void otgwl_drop(struct otgwl_lock *lock)
+{
+       if (lock->held) {
+               wake_unlock(&lock->wakelock);
+               lock->held = false;
+       }
+}
+
+static void otgwl_handle_event(unsigned long event)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&otgwl_spinlock, irqflags);
+
+       if (!enabled) {
+               otgwl_drop(&vbus_lock);
+               spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+               return;
+       }
+
+       switch (event) {
+       case USB_EVENT_VBUS:
+       case USB_EVENT_ENUMERATED:
+               otgwl_hold(&vbus_lock);
+               break;
+
+       case USB_EVENT_NONE:
+       case USB_EVENT_ID:
+       case USB_EVENT_CHARGER:
+               otgwl_temporary_hold(&vbus_lock);
+               break;
+
+       default:
+               break;
+       }
+
+       spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+}
+
+static int otgwl_otg_notifications(struct notifier_block *nb,
+                                  unsigned long event, void *unused)
+{
+       otgwl_handle_event(event);
+       return NOTIFY_OK;
+}
+
+static int set_enabled(const char *val, const struct kernel_param *kp)
+{
+       int rv = param_set_bool(val, kp);
+
+       if (rv)
+               return rv;
+
+       if (otgwl_xceiv)
+               otgwl_handle_event(otgwl_xceiv->last_event);
+
+       return 0;
+}
+
+static struct kernel_param_ops enabled_param_ops = {
+       .set = set_enabled,
+       .get = param_get_bool,
+};
+
+module_param_cb(enabled, &enabled_param_ops, &enabled, 0644);
+MODULE_PARM_DESC(enabled, "enable wakelock when VBUS present");
+
+static int __init otg_wakelock_init(void)
+{
+       int ret;
+       struct usb_phy *phy;
+
+       phy = usb_get_phy(USB_PHY_TYPE_USB2);
+
+       if (IS_ERR(phy)) {
+               pr_err("%s: No USB transceiver found\n", __func__);
+               return PTR_ERR(phy);
+       }
+       otgwl_xceiv = phy;
+
+       snprintf(vbus_lock.name, sizeof(vbus_lock.name), "vbus-%s",
+                dev_name(otgwl_xceiv->dev));
+       wake_lock_init(&vbus_lock.wakelock, WAKE_LOCK_SUSPEND,
+                      vbus_lock.name);
+
+       otgwl_nb.notifier_call = otgwl_otg_notifications;
+       ret = usb_register_notifier(otgwl_xceiv, &otgwl_nb);
+
+       if (ret) {
+               pr_err("%s: usb_register_notifier on transceiver %s"
+                      " failed\n", __func__,
+                      dev_name(otgwl_xceiv->dev));
+               otgwl_xceiv = NULL;
+               wake_lock_destroy(&vbus_lock.wakelock);
+               return ret;
+       }
+
+       otgwl_handle_event(otgwl_xceiv->last_event);
+       return ret;
+}
+
+late_initcall(otg_wakelock_init);
index e0606c01e8ac7166d88d2ad20028c166ebef625e..35d239ebf9125cac6a4d75fa97bc3e51262c5d22 100644 (file)
@@ -29,6 +29,7 @@ source "drivers/video/fbdev/Kconfig"
 endmenu
 
 source "drivers/video/backlight/Kconfig"
+source "drivers/video/adf/Kconfig"
 
 config VGASTATE
        tristate
index 9ad3c17d645689b79e56364377a71671eb4aac23..1a8c4ced39b29c145bf4c4cfd1fc44cd5ea7d2c7 100644 (file)
@@ -1,6 +1,7 @@
 obj-$(CONFIG_VGASTATE)            += vgastate.o
 obj-$(CONFIG_HDMI)                += hdmi.o
 
+obj-$(CONFIG_ADF)                += adf/
 obj-$(CONFIG_VT)                 += console/
 obj-$(CONFIG_LOGO)               += logo/
 obj-y                            += backlight/
diff --git a/drivers/video/adf/Kconfig b/drivers/video/adf/Kconfig
new file mode 100644 (file)
index 0000000..2777db4
--- /dev/null
@@ -0,0 +1,14 @@
+menuconfig ADF
+       depends on SYNC
+       depends on DMA_SHARED_BUFFER
+       tristate "Atomic Display Framework"
+
+menuconfig ADF_FBDEV
+       depends on ADF
+       depends on FB
+       tristate "Helper for implementing the fbdev API in ADF drivers"
+
+menuconfig ADF_MEMBLOCK
+       depends on ADF
+       depends on HAVE_MEMBLOCK
+       bool "Helper for using memblocks as buffers in ADF drivers"
diff --git a/drivers/video/adf/Makefile b/drivers/video/adf/Makefile
new file mode 100644 (file)
index 0000000..cdf34a6
--- /dev/null
@@ -0,0 +1,17 @@
+ccflags-y := -Idrivers/staging/android
+
+CFLAGS_adf.o := -I$(src)
+
+obj-$(CONFIG_ADF) += adf_core.o
+
+adf_core-y := adf.o \
+       adf_client.o \
+       adf_fops.o \
+       adf_format.o \
+       adf_sysfs.o
+
+adf_core-$(CONFIG_COMPAT) += adf_fops32.o
+
+obj-$(CONFIG_ADF_FBDEV) += adf_fbdev.o
+
+obj-$(CONFIG_ADF_MEMBLOCK) += adf_memblock.o
diff --git a/drivers/video/adf/adf.c b/drivers/video/adf/adf.c
new file mode 100644 (file)
index 0000000..42c30c0
--- /dev/null
@@ -0,0 +1,1188 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ * adf_modeinfo_{set_name,set_vrefresh} modified from
+ * drivers/gpu/drm/drm_modes.c
+ * adf_format_validate_yuv modified from framebuffer_check in
+ * drivers/gpu/drm/drm_crtc.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <video/adf_format.h>
+
+#include "sw_sync.h"
+#include "sync.h"
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+#define CREATE_TRACE_POINTS
+#include "adf_trace.h"
+
+#define ADF_SHORT_FENCE_TIMEOUT (1 * MSEC_PER_SEC)
+#define ADF_LONG_FENCE_TIMEOUT (10 * MSEC_PER_SEC)
+
+static DEFINE_IDR(adf_devices);
+
+static void adf_fence_wait(struct adf_device *dev, struct sync_fence *fence)
+{
+       /* sync_fence_wait() dumps debug information on timeout.  Experience
+          has shown that if the pipeline gets stuck, a short timeout followed
+          by a longer one provides useful information for debugging. */
+       int err = sync_fence_wait(fence, ADF_SHORT_FENCE_TIMEOUT);
+       if (err >= 0)
+               return;
+
+       if (err == -ETIME)
+               err = sync_fence_wait(fence, ADF_LONG_FENCE_TIMEOUT);
+
+       if (err < 0)
+               dev_warn(&dev->base.dev, "error waiting on fence: %d\n", err);
+}
+
+void adf_buffer_cleanup(struct adf_buffer *buf)
+{
+       size_t i;
+       for (i = 0; i < ARRAY_SIZE(buf->dma_bufs); i++)
+               if (buf->dma_bufs[i])
+                       dma_buf_put(buf->dma_bufs[i]);
+
+       if (buf->acquire_fence)
+               sync_fence_put(buf->acquire_fence);
+}
+
+void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
+               struct adf_buffer *buf)
+{
+       /* calling adf_buffer_mapping_cleanup() is safe even if mapping is
+          uninitialized or partially-initialized, as long as it was
+          zeroed on allocation */
+       size_t i;
+       for (i = 0; i < ARRAY_SIZE(mapping->sg_tables); i++) {
+               if (mapping->sg_tables[i])
+                       dma_buf_unmap_attachment(mapping->attachments[i],
+                                       mapping->sg_tables[i], DMA_TO_DEVICE);
+               if (mapping->attachments[i])
+                       dma_buf_detach(buf->dma_bufs[i],
+                                       mapping->attachments[i]);
+       }
+}
+
+void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post)
+{
+       size_t i;
+
+       if (post->state)
+               dev->ops->state_free(dev, post->state);
+
+       for (i = 0; i < post->config.n_bufs; i++) {
+               adf_buffer_mapping_cleanup(&post->config.mappings[i],
+                               &post->config.bufs[i]);
+               adf_buffer_cleanup(&post->config.bufs[i]);
+       }
+
+       kfree(post->config.custom_data);
+       kfree(post->config.mappings);
+       kfree(post->config.bufs);
+       kfree(post);
+}
+
+static void adf_sw_advance_timeline(struct adf_device *dev)
+{
+#ifdef CONFIG_SW_SYNC
+       sw_sync_timeline_inc(dev->timeline, 1);
+#else
+       BUG();
+#endif
+}
+
+static void adf_post_work_func(struct kthread_work *work)
+{
+       struct adf_device *dev =
+                       container_of(work, struct adf_device, post_work);
+       struct adf_pending_post *post, *next;
+       struct list_head saved_list;
+
+       mutex_lock(&dev->post_lock);
+       memcpy(&saved_list, &dev->post_list, sizeof(saved_list));
+       list_replace_init(&dev->post_list, &saved_list);
+       mutex_unlock(&dev->post_lock);
+
+       list_for_each_entry_safe(post, next, &saved_list, head) {
+               int i;
+
+               for (i = 0; i < post->config.n_bufs; i++) {
+                       struct sync_fence *fence =
+                                       post->config.bufs[i].acquire_fence;
+                       if (fence)
+                               adf_fence_wait(dev, fence);
+               }
+
+               dev->ops->post(dev, &post->config, post->state);
+
+               if (dev->ops->advance_timeline)
+                       dev->ops->advance_timeline(dev, &post->config,
+                                       post->state);
+               else
+                       adf_sw_advance_timeline(dev);
+
+               list_del(&post->head);
+               if (dev->onscreen)
+                       adf_post_cleanup(dev, dev->onscreen);
+               dev->onscreen = post;
+       }
+}
+
+void adf_attachment_free(struct adf_attachment_list *attachment)
+{
+       list_del(&attachment->head);
+       kfree(attachment);
+}
+
+struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
+               enum adf_event_type type)
+{
+       struct rb_root *root = &obj->event_refcount;
+       struct rb_node **new = &(root->rb_node);
+       struct rb_node *parent = NULL;
+       struct adf_event_refcount *refcount;
+
+       while (*new) {
+               refcount = container_of(*new, struct adf_event_refcount, node);
+               parent = *new;
+
+               if (refcount->type > type)
+                       new = &(*new)->rb_left;
+               else if (refcount->type < type)
+                       new = &(*new)->rb_right;
+               else
+                       return refcount;
+       }
+
+       refcount = kzalloc(sizeof(*refcount), GFP_KERNEL);
+       if (!refcount)
+               return NULL;
+       refcount->type = type;
+
+       rb_link_node(&refcount->node, parent, new);
+       rb_insert_color(&refcount->node, root);
+       return refcount;
+}
+
+/**
+ * adf_event_get - increase the refcount for an event
+ *
+ * @obj: the object that produces the event
+ * @type: the event type
+ *
+ * ADF will call the object's set_event() op if needed.  ops are allowed
+ * to sleep, so adf_event_get() must NOT be called from an atomic context.
+ *
+ * Returns 0 if successful, or -%EINVAL if the object does not support the
+ * requested event type.
+ */
+int adf_event_get(struct adf_obj *obj, enum adf_event_type type)
+{
+       struct adf_event_refcount *refcount;
+       int old_refcount;
+       int ret;
+
+       ret = adf_obj_check_supports_event(obj, type);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&obj->event_lock);
+
+       refcount = adf_obj_find_event_refcount(obj, type);
+       if (!refcount) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       old_refcount = refcount->refcount++;
+
+       if (old_refcount == 0) {
+               obj->ops->set_event(obj, type, true);
+               trace_adf_event_enable(obj, type);
+       }
+
+done:
+       mutex_unlock(&obj->event_lock);
+       return ret;
+}
+EXPORT_SYMBOL(adf_event_get);
+
+/**
+ * adf_event_put - decrease the refcount for an event
+ *
+ * @obj: the object that produces the event
+ * @type: the event type
+ *
+ * ADF will call the object's set_event() op if needed.  ops are allowed
+ * to sleep, so adf_event_put() must NOT be called from an atomic context.
+ *
+ * Returns 0 if successful, -%EINVAL if the object does not support the
+ * requested event type, or -%EALREADY if the refcount is already 0.
+ */
+int adf_event_put(struct adf_obj *obj, enum adf_event_type type)
+{
+       struct adf_event_refcount *refcount;
+       int old_refcount;
+       int ret;
+
+       ret = adf_obj_check_supports_event(obj, type);
+       if (ret < 0)
+               return ret;
+
+
+       mutex_lock(&obj->event_lock);
+
+       refcount = adf_obj_find_event_refcount(obj, type);
+       if (!refcount) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       old_refcount = refcount->refcount--;
+
+       if (WARN_ON(old_refcount == 0)) {
+               refcount->refcount++;
+               ret = -EALREADY;
+       } else if (old_refcount == 1) {
+               obj->ops->set_event(obj, type, false);
+               trace_adf_event_disable(obj, type);
+       }
+
+done:
+       mutex_unlock(&obj->event_lock);
+       return ret;
+}
+EXPORT_SYMBOL(adf_event_put);
+
+/**
+ * adf_vsync_wait - wait for a vsync event on a display interface
+ *
+ * @intf: the display interface
+ * @timeout: timeout in jiffies (0 = wait indefinitely)
+ *
+ * adf_vsync_wait() may sleep, so it must NOT be called from an atomic context.
+ *
+ * This function returns -%ERESTARTSYS if it is interrupted by a signal.
+ * If @timeout == 0 then this function returns 0 on vsync. If @timeout > 0 then
+ * this function returns the number of remaining jiffies or -%ETIMEDOUT on
+ * timeout.
+ */
+int adf_vsync_wait(struct adf_interface *intf, long timeout)
+{
+       ktime_t timestamp;
+       int ret;
+       unsigned long flags;
+
+       read_lock_irqsave(&intf->vsync_lock, flags);
+       timestamp = intf->vsync_timestamp;
+       read_unlock_irqrestore(&intf->vsync_lock, flags);
+
+       adf_vsync_get(intf);
+       if (timeout) {
+               ret = wait_event_interruptible_timeout(intf->vsync_wait,
+                               !ktime_equal(timestamp,
+                                               intf->vsync_timestamp),
+                               msecs_to_jiffies(timeout));
+               if (ret == 0 && ktime_equal(timestamp, intf->vsync_timestamp))
+                       ret = -ETIMEDOUT;
+       } else {
+               ret = wait_event_interruptible(intf->vsync_wait,
+                               !ktime_equal(timestamp,
+                                               intf->vsync_timestamp));
+       }
+       adf_vsync_put(intf);
+
+       return ret;
+}
+EXPORT_SYMBOL(adf_vsync_wait);
+
+static void adf_event_queue(struct adf_obj *obj, struct adf_event *event)
+{
+       struct adf_file *file;
+       unsigned long flags;
+
+       trace_adf_event(obj, event->type);
+
+       spin_lock_irqsave(&obj->file_lock, flags);
+
+       list_for_each_entry(file, &obj->file_list, head)
+               if (test_bit(event->type, file->event_subscriptions))
+                       adf_file_queue_event(file, event);
+
+       spin_unlock_irqrestore(&obj->file_lock, flags);
+}
+
+/**
+ * adf_event_notify - notify userspace of a driver-private event
+ *
+ * @obj: the ADF object that produced the event
+ * @event: the event
+ *
+ * adf_event_notify() may be called safely from an atomic context.  It will
+ * copy @event if needed, so @event may point to a variable on the stack.
+ *
+ * Drivers must NOT call adf_event_notify() for vsync and hotplug events.
+ * ADF provides adf_vsync_notify() and
+ * adf_hotplug_notify_{connected,disconnected}() for these events.
+ */
+int adf_event_notify(struct adf_obj *obj, struct adf_event *event)
+{
+       if (WARN_ON(event->type == ADF_EVENT_VSYNC ||
+                       event->type == ADF_EVENT_HOTPLUG))
+               return -EINVAL;
+
+       adf_event_queue(obj, event);
+       return 0;
+}
+EXPORT_SYMBOL(adf_event_notify);
+
+/**
+ * adf_vsync_notify - notify ADF of a display interface's vsync event
+ *
+ * @intf: the display interface
+ * @timestamp: the time the vsync occurred
+ *
+ * adf_vsync_notify() may be called safely from an atomic context.
+ */
+void adf_vsync_notify(struct adf_interface *intf, ktime_t timestamp)
+{
+       unsigned long flags;
+       struct adf_vsync_event event;
+
+       write_lock_irqsave(&intf->vsync_lock, flags);
+       intf->vsync_timestamp = timestamp;
+       write_unlock_irqrestore(&intf->vsync_lock, flags);
+
+       wake_up_interruptible_all(&intf->vsync_wait);
+
+       event.base.type = ADF_EVENT_VSYNC;
+       event.base.length = sizeof(event);
+       event.timestamp = ktime_to_ns(timestamp);
+       adf_event_queue(&intf->base, &event.base);
+}
+EXPORT_SYMBOL(adf_vsync_notify);
+
+void adf_hotplug_notify(struct adf_interface *intf, bool connected,
+               struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+       unsigned long flags;
+       struct adf_hotplug_event event;
+       struct drm_mode_modeinfo *old_modelist;
+
+       write_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+       old_modelist = intf->modelist;
+       intf->hotplug_detect = connected;
+       intf->modelist = modelist;
+       intf->n_modes = n_modes;
+       write_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+       kfree(old_modelist);
+
+       event.base.length = sizeof(event);
+       event.base.type = ADF_EVENT_HOTPLUG;
+       event.connected = connected;
+       adf_event_queue(&intf->base, &event.base);
+}
+
+/**
+ * adf_hotplug_notify_connected - notify ADF of a display interface being
+ * connected to a display
+ *
+ * @intf: the display interface
+ * @modelist: hardware modes supported by display
+ * @n_modes: length of modelist
+ *
+ * @modelist is copied as needed, so it may point to a variable on the stack.
+ *
+ * adf_hotplug_notify_connected() may NOT be called safely from an atomic
+ * context.
+ *
+ * Returns 0 on success or error code (<0) on error.
+ */
+int adf_hotplug_notify_connected(struct adf_interface *intf,
+               struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+       struct drm_mode_modeinfo *modelist_copy;
+
+       if (n_modes > ADF_MAX_MODES)
+               return -ENOMEM;
+
+       modelist_copy = kzalloc(sizeof(modelist_copy[0]) * n_modes,
+                       GFP_KERNEL);
+       if (!modelist_copy)
+               return -ENOMEM;
+       memcpy(modelist_copy, modelist, sizeof(modelist_copy[0]) * n_modes);
+
+       adf_hotplug_notify(intf, true, modelist_copy, n_modes);
+       return 0;
+}
+EXPORT_SYMBOL(adf_hotplug_notify_connected);
+
+/**
+ * adf_hotplug_notify_disconnected - notify ADF of a display interface being
+ * disconnected from a display
+ *
+ * @intf: the display interface
+ *
+ * adf_hotplug_notify_disconnected() may be called safely from an atomic
+ * context.
+ */
+void adf_hotplug_notify_disconnected(struct adf_interface *intf)
+{
+       adf_hotplug_notify(intf, false, NULL, 0);
+}
+EXPORT_SYMBOL(adf_hotplug_notify_disconnected);
+
+static int adf_obj_init(struct adf_obj *obj, enum adf_obj_type type,
+               struct idr *idr, struct adf_device *parent,
+               const struct adf_obj_ops *ops, const char *fmt, va_list args)
+{
+       int ret;
+
+       if (ops && ops->supports_event && !ops->set_event) {
+               pr_err("%s: %s implements supports_event but not set_event\n",
+                               __func__, adf_obj_type_str(type));
+               return -EINVAL;
+       }
+
+       ret = idr_alloc(idr, obj, 0, 0, GFP_KERNEL);
+       if (ret < 0) {
+               pr_err("%s: allocating object id failed: %d\n", __func__, ret);
+               return ret;
+       }
+       obj->id = ret;
+
+       vscnprintf(obj->name, sizeof(obj->name), fmt, args);
+
+       obj->type = type;
+       obj->ops = ops;
+       obj->parent = parent;
+       mutex_init(&obj->event_lock);
+       obj->event_refcount = RB_ROOT;
+       spin_lock_init(&obj->file_lock);
+       INIT_LIST_HEAD(&obj->file_list);
+       return 0;
+}
+
+static void adf_obj_destroy(struct adf_obj *obj, struct idr *idr)
+{
+       struct rb_node *node = rb_first(&obj->event_refcount);
+
+       while (node) {
+               struct adf_event_refcount *refcount =
+                               container_of(node, struct adf_event_refcount,
+                                               node);
+               rb_erase(&refcount->node, &obj->event_refcount);
+               kfree(refcount);
+               node = rb_first(&obj->event_refcount);
+       }
+
+       mutex_destroy(&obj->event_lock);
+       idr_remove(idr, obj->id);
+}
+
+/**
+ * adf_device_init - initialize ADF-internal data for a display device
+ * and create sysfs entries
+ *
+ * @dev: the display device
+ * @parent: the device's parent device
+ * @ops: the device's associated ops
+ * @fmt: formatting string for the display device's name
+ *
+ * @fmt specifies the device's sysfs filename and the name returned to
+ * userspace through the %ADF_GET_DEVICE_DATA ioctl.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_device_init(struct adf_device *dev, struct device *parent,
+               const struct adf_device_ops *ops, const char *fmt, ...)
+{
+       int ret;
+       va_list args;
+
+       if (!ops->validate || !ops->post) {
+               pr_err("%s: device must implement validate and post\n",
+                               __func__);
+               return -EINVAL;
+       }
+
+       if (!ops->complete_fence && !ops->advance_timeline) {
+               if (!IS_ENABLED(CONFIG_SW_SYNC)) {
+                       pr_err("%s: device requires sw_sync but it is not enabled in the kernel\n",
+                                       __func__);
+                       return -EINVAL;
+               }
+       } else if (!(ops->complete_fence && ops->advance_timeline)) {
+               pr_err("%s: device must implement both complete_fence and advance_timeline, or implement neither\n",
+                               __func__);
+               return -EINVAL;
+       }
+
+       memset(dev, 0, sizeof(*dev));
+
+       va_start(args, fmt);
+       ret = adf_obj_init(&dev->base, ADF_OBJ_DEVICE, &adf_devices, dev,
+                       &ops->base, fmt, args);
+       va_end(args);
+       if (ret < 0)
+               return ret;
+
+       dev->dev = parent;
+       dev->ops = ops;
+       idr_init(&dev->overlay_engines);
+       idr_init(&dev->interfaces);
+       mutex_init(&dev->client_lock);
+       INIT_LIST_HEAD(&dev->post_list);
+       mutex_init(&dev->post_lock);
+       init_kthread_worker(&dev->post_worker);
+       INIT_LIST_HEAD(&dev->attached);
+       INIT_LIST_HEAD(&dev->attach_allowed);
+
+       dev->post_thread = kthread_run(kthread_worker_fn,
+                       &dev->post_worker, dev->base.name);
+       if (IS_ERR(dev->post_thread)) {
+               ret = PTR_ERR(dev->post_thread);
+               dev->post_thread = NULL;
+
+               pr_err("%s: failed to run config posting thread: %d\n",
+                               __func__, ret);
+               goto err;
+       }
+       init_kthread_work(&dev->post_work, adf_post_work_func);
+
+       ret = adf_device_sysfs_init(dev);
+       if (ret < 0)
+               goto err;
+
+       return 0;
+
+err:
+       adf_device_destroy(dev);
+       return ret;
+}
+EXPORT_SYMBOL(adf_device_init);
+
+/**
+ * adf_device_destroy - clean up ADF-internal data for a display device
+ *
+ * @dev: the display device
+ */
+void adf_device_destroy(struct adf_device *dev)
+{
+       struct adf_attachment_list *entry, *next;
+
+       idr_destroy(&dev->interfaces);
+       idr_destroy(&dev->overlay_engines);
+
+       if (dev->post_thread) {
+               flush_kthread_worker(&dev->post_worker);
+               kthread_stop(dev->post_thread);
+       }
+
+       if (dev->onscreen)
+               adf_post_cleanup(dev, dev->onscreen);
+       adf_device_sysfs_destroy(dev);
+       list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+               adf_attachment_free(entry);
+       }
+       list_for_each_entry_safe(entry, next, &dev->attached, head) {
+               adf_attachment_free(entry);
+       }
+       mutex_destroy(&dev->post_lock);
+       mutex_destroy(&dev->client_lock);
+
+       if (dev->timeline)
+               sync_timeline_destroy(&dev->timeline->obj);
+
+       adf_obj_destroy(&dev->base, &adf_devices);
+}
+EXPORT_SYMBOL(adf_device_destroy);
+
+/**
+ * adf_interface_init - initialize ADF-internal data for a display interface
+ * and create sysfs entries
+ *
+ * @intf: the display interface
+ * @dev: the interface's "parent" display device
+ * @type: interface type (see enum @adf_interface_type)
+ * @idx: which interface of type @type;
+ *     e.g. interface DSI.1 -> @type=%ADF_INTF_TYPE_DSI, @idx=1
+ * @flags: informational flags (bitmask of %ADF_INTF_FLAG_* values)
+ * @ops: the interface's associated ops
+ * @fmt: formatting string for the display interface's name
+ *
+ * @dev must have previously been initialized with adf_device_init().
+ *
+ * @fmt affects the name returned to userspace through the
+ * %ADF_GET_INTERFACE_DATA ioctl.  It does not affect the sysfs filename,
+ * which is derived from @dev's name.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_interface_init(struct adf_interface *intf, struct adf_device *dev,
+               enum adf_interface_type type, u32 idx, u32 flags,
+               const struct adf_interface_ops *ops, const char *fmt, ...)
+{
+       int ret;
+       va_list args;
+       const u32 allowed_flags = ADF_INTF_FLAG_PRIMARY |
+                       ADF_INTF_FLAG_EXTERNAL;
+
+       if (dev->n_interfaces == ADF_MAX_INTERFACES) {
+               pr_err("%s: parent device %s has too many interfaces\n",
+                               __func__, dev->base.name);
+               return -ENOMEM;
+       }
+
+       if (type >= ADF_INTF_MEMORY && type <= ADF_INTF_TYPE_DEVICE_CUSTOM) {
+               pr_err("%s: invalid interface type %u\n", __func__, type);
+               return -EINVAL;
+       }
+
+       if (flags & ~allowed_flags) {
+               pr_err("%s: invalid interface flags 0x%X\n", __func__,
+                               flags & ~allowed_flags);
+               return -EINVAL;
+       }
+
+       memset(intf, 0, sizeof(*intf));
+
+       va_start(args, fmt);
+       ret = adf_obj_init(&intf->base, ADF_OBJ_INTERFACE, &dev->interfaces,
+                       dev, ops ? &ops->base : NULL, fmt, args);
+       va_end(args);
+       if (ret < 0)
+               return ret;
+
+       intf->type = type;
+       intf->idx = idx;
+       intf->flags = flags;
+       intf->ops = ops;
+       intf->dpms_state = DRM_MODE_DPMS_OFF;
+       init_waitqueue_head(&intf->vsync_wait);
+       rwlock_init(&intf->vsync_lock);
+       rwlock_init(&intf->hotplug_modelist_lock);
+
+       ret = adf_interface_sysfs_init(intf);
+       if (ret < 0)
+               goto err;
+       dev->n_interfaces++;
+
+       return 0;
+
+err:
+       adf_obj_destroy(&intf->base, &dev->interfaces);
+       return ret;
+}
+EXPORT_SYMBOL(adf_interface_init);
+
+/**
+ * adf_interface_destroy - clean up ADF-internal data for a display interface
+ *
+ * @intf: the display interface
+ */
+void adf_interface_destroy(struct adf_interface *intf)
+{
+       struct adf_device *dev = adf_interface_parent(intf);
+       struct adf_attachment_list *entry, *next;
+
+       mutex_lock(&dev->client_lock);
+       list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+               if (entry->attachment.interface == intf) {
+                       adf_attachment_free(entry);
+                       dev->n_attach_allowed--;
+               }
+       }
+       list_for_each_entry_safe(entry, next, &dev->attached, head) {
+               if (entry->attachment.interface == intf) {
+                       adf_device_detach_op(dev,
+                                       entry->attachment.overlay_engine, intf);
+                       adf_attachment_free(entry);
+                       dev->n_attached--;
+               }
+       }
+       kfree(intf->modelist);
+       adf_interface_sysfs_destroy(intf);
+       adf_obj_destroy(&intf->base, &dev->interfaces);
+       dev->n_interfaces--;
+       mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_interface_destroy);
+
+static bool adf_overlay_engine_has_custom_formats(
+               const struct adf_overlay_engine_ops *ops)
+{
+       size_t i;
+       for (i = 0; i < ops->n_supported_formats; i++)
+               if (!adf_format_is_standard(ops->supported_formats[i]))
+                       return true;
+       return false;
+}
+
+/**
+ * adf_overlay_engine_init - initialize ADF-internal data for an
+ * overlay engine and create sysfs entries
+ *
+ * @eng: the overlay engine
+ * @dev: the overlay engine's "parent" display device
+ * @ops: the overlay engine's associated ops
+ * @fmt: formatting string for the overlay engine's name
+ *
+ * @dev must have previously been initialized with adf_device_init().
+ *
+ * @fmt affects the name returned to userspace through the
+ * %ADF_GET_OVERLAY_ENGINE_DATA ioctl.  It does not affect the sysfs filename,
+ * which is derived from @dev's name.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_overlay_engine_init(struct adf_overlay_engine *eng,
+               struct adf_device *dev,
+               const struct adf_overlay_engine_ops *ops, const char *fmt, ...)
+{
+       int ret;
+       va_list args;
+
+       if (!ops->supported_formats) {
+               pr_err("%s: overlay engine must support at least one format\n",
+                               __func__);
+               return -EINVAL;
+       }
+
+       if (ops->n_supported_formats > ADF_MAX_SUPPORTED_FORMATS) {
+               pr_err("%s: overlay engine supports too many formats\n",
+                               __func__);
+               return -EINVAL;
+       }
+
+       if (adf_overlay_engine_has_custom_formats(ops) &&
+                       !dev->ops->validate_custom_format) {
+               pr_err("%s: overlay engine has custom formats but parent device %s does not implement validate_custom_format\n",
+                               __func__, dev->base.name);
+               return -EINVAL;
+       }
+
+       memset(eng, 0, sizeof(*eng));
+
+       va_start(args, fmt);
+       ret = adf_obj_init(&eng->base, ADF_OBJ_OVERLAY_ENGINE,
+                       &dev->overlay_engines, dev, &ops->base, fmt, args);
+       va_end(args);
+       if (ret < 0)
+               return ret;
+
+       eng->ops = ops;
+
+       ret = adf_overlay_engine_sysfs_init(eng);
+       if (ret < 0)
+               goto err;
+
+       return 0;
+
+err:
+       adf_obj_destroy(&eng->base, &dev->overlay_engines);
+       return ret;
+}
+EXPORT_SYMBOL(adf_overlay_engine_init);
+
+/**
+ * adf_interface_destroy - clean up ADF-internal data for an overlay engine
+ *
+ * @eng: the overlay engine
+ */
+void adf_overlay_engine_destroy(struct adf_overlay_engine *eng)
+{
+       struct adf_device *dev = adf_overlay_engine_parent(eng);
+       struct adf_attachment_list *entry, *next;
+
+       mutex_lock(&dev->client_lock);
+       list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+               if (entry->attachment.overlay_engine == eng) {
+                       adf_attachment_free(entry);
+                       dev->n_attach_allowed--;
+               }
+       }
+       list_for_each_entry_safe(entry, next, &dev->attached, head) {
+               if (entry->attachment.overlay_engine == eng) {
+                       adf_device_detach_op(dev, eng,
+                                       entry->attachment.interface);
+                       adf_attachment_free(entry);
+                       dev->n_attached--;
+               }
+       }
+       adf_overlay_engine_sysfs_destroy(eng);
+       adf_obj_destroy(&eng->base, &dev->overlay_engines);
+       mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_overlay_engine_destroy);
+
+struct adf_attachment_list *adf_attachment_find(struct list_head *list,
+               struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+       struct adf_attachment_list *entry;
+       list_for_each_entry(entry, list, head) {
+               if (entry->attachment.interface == intf &&
+                               entry->attachment.overlay_engine == eng)
+                       return entry;
+       }
+       return NULL;
+}
+
+int adf_attachment_validate(struct adf_device *dev,
+               struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+       struct adf_device *intf_dev = adf_interface_parent(intf);
+       struct adf_device *eng_dev = adf_overlay_engine_parent(eng);
+
+       if (intf_dev != dev) {
+               dev_err(&dev->base.dev, "can't attach interface %s belonging to device %s\n",
+                               intf->base.name, intf_dev->base.name);
+               return -EINVAL;
+       }
+
+       if (eng_dev != dev) {
+               dev_err(&dev->base.dev, "can't attach overlay engine %s belonging to device %s\n",
+                               eng->base.name, eng_dev->base.name);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * adf_attachment_allow - add a new entry to the list of allowed
+ * attachments
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * adf_attachment_allow() indicates that the underlying display hardware allows
+ * @intf to scan out @eng's output.  It is intended to be called at
+ * driver initialization for each supported overlay engine + interface pair.
+ *
+ * Returns 0 on success, -%EALREADY if the entry already exists, or -errno on
+ * any other failure.
+ */
+int adf_attachment_allow(struct adf_device *dev,
+               struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+       int ret;
+       struct adf_attachment_list *entry = NULL;
+
+       ret = adf_attachment_validate(dev, eng, intf);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&dev->client_lock);
+
+       if (dev->n_attach_allowed == ADF_MAX_ATTACHMENTS) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       if (adf_attachment_find(&dev->attach_allowed, eng, intf)) {
+               ret = -EALREADY;
+               goto done;
+       }
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       entry->attachment.interface = intf;
+       entry->attachment.overlay_engine = eng;
+       list_add_tail(&entry->head, &dev->attach_allowed);
+       dev->n_attach_allowed++;
+
+done:
+       mutex_unlock(&dev->client_lock);
+       if (ret < 0)
+               kfree(entry);
+
+       return ret;
+}
+EXPORT_SYMBOL(adf_attachment_allow);
+
+/**
+ * adf_obj_type_str - string representation of an adf_obj_type
+ *
+ * @type: the object type
+ */
+const char *adf_obj_type_str(enum adf_obj_type type)
+{
+       switch (type) {
+       case ADF_OBJ_OVERLAY_ENGINE:
+               return "overlay engine";
+
+       case ADF_OBJ_INTERFACE:
+               return "interface";
+
+       case ADF_OBJ_DEVICE:
+               return "device";
+
+       default:
+               return "unknown";
+       }
+}
+EXPORT_SYMBOL(adf_obj_type_str);
+
+/**
+ * adf_interface_type_str - string representation of an adf_interface's type
+ *
+ * @intf: the interface
+ */
+const char *adf_interface_type_str(struct adf_interface *intf)
+{
+       switch (intf->type) {
+       case ADF_INTF_DSI:
+               return "DSI";
+
+       case ADF_INTF_eDP:
+               return "eDP";
+
+       case ADF_INTF_DPI:
+               return "DPI";
+
+       case ADF_INTF_VGA:
+               return "VGA";
+
+       case ADF_INTF_DVI:
+               return "DVI";
+
+       case ADF_INTF_HDMI:
+               return "HDMI";
+
+       case ADF_INTF_MEMORY:
+               return "memory";
+
+       default:
+               if (intf->type >= ADF_INTF_TYPE_DEVICE_CUSTOM) {
+                       if (intf->ops && intf->ops->type_str)
+                               return intf->ops->type_str(intf);
+                       return "custom";
+               }
+               return "unknown";
+       }
+}
+EXPORT_SYMBOL(adf_interface_type_str);
+
+/**
+ * adf_event_type_str - string representation of an adf_event_type
+ *
+ * @obj: ADF object that produced the event
+ * @type: event type
+ */
+const char *adf_event_type_str(struct adf_obj *obj, enum adf_event_type type)
+{
+       switch (type) {
+       case ADF_EVENT_VSYNC:
+               return "vsync";
+
+       case ADF_EVENT_HOTPLUG:
+               return "hotplug";
+
+       default:
+               if (type >= ADF_EVENT_DEVICE_CUSTOM) {
+                       if (obj->ops && obj->ops->event_type_str)
+                               return obj->ops->event_type_str(obj, type);
+                       return "custom";
+               }
+               return "unknown";
+       }
+}
+EXPORT_SYMBOL(adf_event_type_str);
+
+/**
+ * adf_format_str - string representation of an ADF/DRM fourcc format
+ *
+ * @format: format fourcc
+ * @buf: target buffer for the format's string representation
+ */
+void adf_format_str(u32 format, char buf[ADF_FORMAT_STR_SIZE])
+{
+       buf[0] = format & 0xFF;
+       buf[1] = (format >> 8) & 0xFF;
+       buf[2] = (format >> 16) & 0xFF;
+       buf[3] = (format >> 24) & 0xFF;
+       buf[4] = '\0';
+}
+EXPORT_SYMBOL(adf_format_str);
+
+/**
+ * adf_format_validate_yuv - validate the number and size of planes in buffers
+ * with a custom YUV format.
+ *
+ * @dev: ADF device performing the validation
+ * @buf: buffer to validate
+ * @num_planes: expected number of planes
+ * @hsub: expected horizontal chroma subsampling factor, in pixels
+ * @vsub: expected vertical chroma subsampling factor, in pixels
+ * @cpp: expected bytes per pixel for each plane (length @num_planes)
+ *
+ * adf_format_validate_yuv() is intended to be called as a helper from @dev's
+ * validate_custom_format() op.
+ *
+ * Returns 0 if @buf has the expected number of planes and each plane
+ * has sufficient size, or -EINVAL otherwise.
+ */
+int adf_format_validate_yuv(struct adf_device *dev, struct adf_buffer *buf,
+               u8 num_planes, u8 hsub, u8 vsub, u8 cpp[])
+{
+       u8 i;
+
+       if (num_planes != buf->n_planes) {
+               char format_str[ADF_FORMAT_STR_SIZE];
+               adf_format_str(buf->format, format_str);
+               dev_err(&dev->base.dev, "%u planes expected for format %s but %u planes provided\n",
+                               num_planes, format_str, buf->n_planes);
+               return -EINVAL;
+       }
+
+       if (buf->w == 0 || buf->w % hsub) {
+               dev_err(&dev->base.dev, "bad buffer width %u\n", buf->w);
+               return -EINVAL;
+       }
+
+       if (buf->h == 0 || buf->h % vsub) {
+               dev_err(&dev->base.dev, "bad buffer height %u\n", buf->h);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < num_planes; i++) {
+               u32 width = buf->w / (i != 0 ? hsub : 1);
+               u32 height = buf->h / (i != 0 ? vsub : 1);
+               u8 cpp = adf_format_plane_cpp(buf->format, i);
+               u32 last_line_size;
+
+               if (buf->pitch[i] < (u64) width * cpp) {
+                       dev_err(&dev->base.dev, "plane %u pitch is shorter than buffer width (pitch = %u, width = %u, bpp = %u)\n",
+                                       i, buf->pitch[i], width, cpp * 8);
+                       return -EINVAL;
+               }
+
+               switch (dev->ops->quirks.buffer_padding) {
+               case ADF_BUFFER_PADDED_TO_PITCH:
+                       last_line_size = buf->pitch[i];
+                       break;
+
+               case ADF_BUFFER_UNPADDED:
+                       last_line_size = width * cpp;
+                       break;
+
+               default:
+                       BUG();
+               }
+
+               if ((u64) (height - 1) * buf->pitch[i] + last_line_size +
+                               buf->offset[i] > buf->dma_bufs[i]->size) {
+                       dev_err(&dev->base.dev, "plane %u buffer too small (height = %u, pitch = %u, offset = %u, size = %zu)\n",
+                                       i, height, buf->pitch[i],
+                                       buf->offset[i], buf->dma_bufs[i]->size);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(adf_format_validate_yuv);
+
+/**
+ * adf_modeinfo_set_name - sets the name of a mode from its display resolution
+ *
+ * @mode: mode
+ *
+ * adf_modeinfo_set_name() fills in @mode->name in the format
+ * "[hdisplay]x[vdisplay](i)".  It is intended to help drivers create
+ * ADF/DRM-style modelists from other mode formats.
+ */
+void adf_modeinfo_set_name(struct drm_mode_modeinfo *mode)
+{
+       bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+
+       snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+                mode->hdisplay, mode->vdisplay,
+                interlaced ? "i" : "");
+}
+EXPORT_SYMBOL(adf_modeinfo_set_name);
+
+/**
+ * adf_modeinfo_set_vrefresh - sets the vrefresh of a mode from its other
+ * timing data
+ *
+ * @mode: mode
+ *
+ * adf_modeinfo_set_vrefresh() calculates @mode->vrefresh from
+ * @mode->{h,v}display and @mode->flags.  It is intended to help drivers
+ * create ADF/DRM-style modelists from other mode formats.
+ */
+void adf_modeinfo_set_vrefresh(struct drm_mode_modeinfo *mode)
+{
+       int refresh = 0;
+       unsigned int calc_val;
+
+       if (mode->vrefresh > 0)
+               return;
+
+       if (mode->htotal <= 0 || mode->vtotal <= 0)
+               return;
+
+       /* work out vrefresh the value will be x1000 */
+       calc_val = (mode->clock * 1000);
+       calc_val /= mode->htotal;
+       refresh = (calc_val + mode->vtotal / 2) / mode->vtotal;
+
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               refresh *= 2;
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               refresh /= 2;
+       if (mode->vscan > 1)
+               refresh /= mode->vscan;
+
+       mode->vrefresh = refresh;
+}
+EXPORT_SYMBOL(adf_modeinfo_set_vrefresh);
+
+static int __init adf_init(void)
+{
+       int err;
+
+       err = adf_sysfs_init();
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static void __exit adf_exit(void)
+{
+       adf_sysfs_destroy();
+}
+
+module_init(adf_init);
+module_exit(adf_exit);
diff --git a/drivers/video/adf/adf.h b/drivers/video/adf/adf.h
new file mode 100644 (file)
index 0000000..3bcf1fa
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_H
+#define __VIDEO_ADF_ADF_H
+
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <video/adf.h>
+#include "sync.h"
+
+struct adf_event_refcount {
+       struct rb_node node;
+       enum adf_event_type type;
+       int refcount;
+};
+
+void adf_buffer_cleanup(struct adf_buffer *buf);
+void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
+               struct adf_buffer *buf);
+void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post);
+
+struct adf_attachment_list *adf_attachment_find(struct list_head *list,
+               struct adf_overlay_engine *eng, struct adf_interface *intf);
+int adf_attachment_validate(struct adf_device *dev,
+               struct adf_overlay_engine *eng, struct adf_interface *intf);
+void adf_attachment_free(struct adf_attachment_list *attachment);
+
+struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
+               enum adf_event_type type);
+
+static inline int adf_obj_check_supports_event(struct adf_obj *obj,
+               enum adf_event_type type)
+{
+       if (!obj->ops || !obj->ops->supports_event)
+               return -EOPNOTSUPP;
+       if (!obj->ops->supports_event(obj, type))
+               return -EINVAL;
+       return 0;
+}
+
+static inline int adf_device_attach_op(struct adf_device *dev,
+               struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+       if (!dev->ops->attach)
+               return 0;
+
+       return dev->ops->attach(dev, eng, intf);
+}
+
+static inline int adf_device_detach_op(struct adf_device *dev,
+               struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+       if (!dev->ops->detach)
+               return 0;
+
+       return dev->ops->detach(dev, eng, intf);
+}
+
+#endif /* __VIDEO_ADF_ADF_H */
diff --git a/drivers/video/adf/adf_client.c b/drivers/video/adf/adf_client.c
new file mode 100644 (file)
index 0000000..8061d8e
--- /dev/null
@@ -0,0 +1,811 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "sw_sync.h"
+
+#include <video/adf.h>
+#include <video/adf_client.h>
+#include <video/adf_format.h>
+
+#include "adf.h"
+
+static inline bool vsync_active(u8 state)
+{
+       return state == DRM_MODE_DPMS_ON || state == DRM_MODE_DPMS_STANDBY;
+}
+
+/**
+ * adf_interface_blank - set interface's DPMS state
+ *
+ * @intf: the interface
+ * @state: one of %DRM_MODE_DPMS_*
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_blank(struct adf_interface *intf, u8 state)
+{
+       struct adf_device *dev = adf_interface_parent(intf);
+       u8 prev_state;
+       bool disable_vsync;
+       bool enable_vsync;
+       int ret = 0;
+       struct adf_event_refcount *vsync_refcount;
+
+       if (!intf->ops || !intf->ops->blank)
+               return -EOPNOTSUPP;
+
+       if (state > DRM_MODE_DPMS_OFF)
+               return -EINVAL;
+
+       mutex_lock(&dev->client_lock);
+       if (state != DRM_MODE_DPMS_ON)
+               flush_kthread_worker(&dev->post_worker);
+       mutex_lock(&intf->base.event_lock);
+
+       vsync_refcount = adf_obj_find_event_refcount(&intf->base,
+                       ADF_EVENT_VSYNC);
+       if (!vsync_refcount) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       prev_state = intf->dpms_state;
+       if (prev_state == state) {
+               ret = -EBUSY;
+               goto done;
+       }
+
+       disable_vsync = vsync_active(prev_state) &&
+                       !vsync_active(state) &&
+                       vsync_refcount->refcount;
+       enable_vsync = !vsync_active(prev_state) &&
+                       vsync_active(state) &&
+                       vsync_refcount->refcount;
+
+       if (disable_vsync)
+               intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+                               false);
+
+       ret = intf->ops->blank(intf, state);
+       if (ret < 0) {
+               if (disable_vsync)
+                       intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+                                       true);
+               goto done;
+       }
+
+       if (enable_vsync)
+               intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+                               true);
+
+       intf->dpms_state = state;
+done:
+       mutex_unlock(&intf->base.event_lock);
+       mutex_unlock(&dev->client_lock);
+       return ret;
+}
+EXPORT_SYMBOL(adf_interface_blank);
+
+/**
+ * adf_interface_blank - get interface's current DPMS state
+ *
+ * @intf: the interface
+ *
+ * Returns one of %DRM_MODE_DPMS_*.
+ */
+u8 adf_interface_dpms_state(struct adf_interface *intf)
+{
+       struct adf_device *dev = adf_interface_parent(intf);
+       u8 dpms_state;
+
+       mutex_lock(&dev->client_lock);
+       dpms_state = intf->dpms_state;
+       mutex_unlock(&dev->client_lock);
+
+       return dpms_state;
+}
+EXPORT_SYMBOL(adf_interface_dpms_state);
+
+/**
+ * adf_interface_current_mode - get interface's current display mode
+ *
+ * @intf: the interface
+ * @mode: returns the current mode
+ */
+void adf_interface_current_mode(struct adf_interface *intf,
+               struct drm_mode_modeinfo *mode)
+{
+       struct adf_device *dev = adf_interface_parent(intf);
+
+       mutex_lock(&dev->client_lock);
+       memcpy(mode, &intf->current_mode, sizeof(*mode));
+       mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_interface_current_mode);
+
+/**
+ * adf_interface_modelist - get interface's modelist
+ *
+ * @intf: the interface
+ * @modelist: storage for the modelist (optional)
+ * @n_modes: length of @modelist
+ *
+ * If @modelist is not NULL, adf_interface_modelist() will copy up to @n_modes
+ * modelist entries into @modelist.
+ *
+ * Returns the length of the modelist.
+ */
+size_t adf_interface_modelist(struct adf_interface *intf,
+               struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+       unsigned long flags;
+       size_t retval;
+
+       read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+       if (modelist)
+               memcpy(modelist, intf->modelist, sizeof(modelist[0]) *
+                               min(n_modes, intf->n_modes));
+       retval = intf->n_modes;
+       read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+       return retval;
+}
+EXPORT_SYMBOL(adf_interface_modelist);
+
+/**
+ * adf_interface_set_mode - set interface's display mode
+ *
+ * @intf: the interface
+ * @mode: the new mode
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_set_mode(struct adf_interface *intf,
+               struct drm_mode_modeinfo *mode)
+{
+       struct adf_device *dev = adf_interface_parent(intf);
+       int ret = 0;
+
+       if (!intf->ops || !intf->ops->modeset)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&dev->client_lock);
+       flush_kthread_worker(&dev->post_worker);
+
+       ret = intf->ops->modeset(intf, mode);
+       if (ret < 0)
+               goto done;
+
+       memcpy(&intf->current_mode, mode, sizeof(*mode));
+done:
+       mutex_unlock(&dev->client_lock);
+       return ret;
+}
+EXPORT_SYMBOL(adf_interface_set_mode);
+
+/**
+ * adf_interface_screen_size - get size of screen connected to interface
+ *
+ * @intf: the interface
+ * @width_mm: returns the screen width in mm
+ * @height_mm: returns the screen width in mm
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_get_screen_size(struct adf_interface *intf, u16 *width_mm,
+               u16 *height_mm)
+{
+       struct adf_device *dev = adf_interface_parent(intf);
+       int ret;
+
+       if (!intf->ops || !intf->ops->screen_size)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&dev->client_lock);
+       ret = intf->ops->screen_size(intf, width_mm, height_mm);
+       mutex_unlock(&dev->client_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(adf_interface_get_screen_size);
+
+/**
+ * adf_overlay_engine_supports_format - returns whether a format is in an
+ * overlay engine's supported list
+ *
+ * @eng: the overlay engine
+ * @format: format fourcc
+ */
+bool adf_overlay_engine_supports_format(struct adf_overlay_engine *eng,
+               u32 format)
+{
+       size_t i;
+       for (i = 0; i < eng->ops->n_supported_formats; i++)
+               if (format == eng->ops->supported_formats[i])
+                       return true;
+
+       return false;
+}
+EXPORT_SYMBOL(adf_overlay_engine_supports_format);
+
+static int adf_buffer_validate(struct adf_buffer *buf)
+{
+       struct adf_overlay_engine *eng = buf->overlay_engine;
+       struct device *dev = &eng->base.dev;
+       struct adf_device *parent = adf_overlay_engine_parent(eng);
+       u8 hsub, vsub, num_planes, cpp[ADF_MAX_PLANES], i;
+
+       if (!adf_overlay_engine_supports_format(eng, buf->format)) {
+               char format_str[ADF_FORMAT_STR_SIZE];
+               adf_format_str(buf->format, format_str);
+               dev_err(dev, "unsupported format %s\n", format_str);
+               return -EINVAL;
+       }
+
+       if (!adf_format_is_standard(buf->format))
+               return parent->ops->validate_custom_format(parent, buf);
+
+       hsub = adf_format_horz_chroma_subsampling(buf->format);
+       vsub = adf_format_vert_chroma_subsampling(buf->format);
+       num_planes = adf_format_num_planes(buf->format);
+       for (i = 0; i < num_planes; i++)
+               cpp[i] = adf_format_plane_cpp(buf->format, i);
+
+       return adf_format_validate_yuv(parent, buf, num_planes, hsub, vsub,
+                       cpp);
+}
+
+static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf,
+               struct adf_buffer_mapping *mapping)
+{
+       int ret = 0;
+       size_t i;
+
+       for (i = 0; i < buf->n_planes; i++) {
+               struct dma_buf_attachment *attachment;
+               struct sg_table *sg_table;
+
+               attachment = dma_buf_attach(buf->dma_bufs[i], dev->dev);
+               if (IS_ERR(attachment)) {
+                       ret = PTR_ERR(attachment);
+                       dev_err(&dev->base.dev, "attaching plane %zu failed: %d\n",
+                                       i, ret);
+                       goto done;
+               }
+               mapping->attachments[i] = attachment;
+
+               sg_table = dma_buf_map_attachment(attachment, DMA_TO_DEVICE);
+               if (IS_ERR(sg_table)) {
+                       ret = PTR_ERR(sg_table);
+                       dev_err(&dev->base.dev, "mapping plane %zu failed: %d",
+                                       i, ret);
+                       goto done;
+               } else if (!sg_table) {
+                       ret = -ENOMEM;
+                       dev_err(&dev->base.dev, "mapping plane %zu failed\n",
+                                       i);
+                       goto done;
+               }
+               mapping->sg_tables[i] = sg_table;
+       }
+
+done:
+       if (ret < 0)
+               adf_buffer_mapping_cleanup(mapping, buf);
+
+       return ret;
+}
+
+static struct sync_fence *adf_sw_complete_fence(struct adf_device *dev)
+{
+       struct sync_pt *pt;
+       struct sync_fence *complete_fence;
+
+       if (!dev->timeline) {
+               dev->timeline = sw_sync_timeline_create(dev->base.name);
+               if (!dev->timeline)
+                       return ERR_PTR(-ENOMEM);
+               dev->timeline_max = 1;
+       }
+
+       dev->timeline_max++;
+       pt = sw_sync_pt_create(dev->timeline, dev->timeline_max);
+       if (!pt)
+               goto err_pt_create;
+       complete_fence = sync_fence_create(dev->base.name, pt);
+       if (!complete_fence)
+               goto err_fence_create;
+
+       return complete_fence;
+
+err_fence_create:
+       sync_pt_free(pt);
+err_pt_create:
+       dev->timeline_max--;
+       return ERR_PTR(-ENOSYS);
+}
+
+/**
+ * adf_device_post - flip to a new set of buffers
+ *
+ * @dev: device targeted by the flip
+ * @intfs: interfaces targeted by the flip
+ * @n_intfs: number of targeted interfaces
+ * @bufs: description of buffers displayed
+ * @n_bufs: number of buffers displayed
+ * @custom_data: driver-private data
+ * @custom_data_size: size of driver-private data
+ *
+ * adf_device_post() will copy @intfs, @bufs, and @custom_data, so they may
+ * point to variables on the stack.  adf_device_post() also takes its own
+ * reference on each of the dma-bufs in @bufs.  The adf_device_post_nocopy()
+ * variant transfers ownership of these resources to ADF instead.
+ *
+ * On success, returns a sync fence which signals when the buffers are removed
+ * from the screen.  On failure, returns ERR_PTR(-errno).
+ */
+struct sync_fence *adf_device_post(struct adf_device *dev,
+               struct adf_interface **intfs, size_t n_intfs,
+               struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
+               size_t custom_data_size)
+{
+       struct adf_interface **intfs_copy = NULL;
+       struct adf_buffer *bufs_copy = NULL;
+       void *custom_data_copy = NULL;
+       struct sync_fence *ret;
+       size_t i;
+
+       intfs_copy = kzalloc(sizeof(intfs_copy[0]) * n_intfs, GFP_KERNEL);
+       if (!intfs_copy)
+               return ERR_PTR(-ENOMEM);
+
+       bufs_copy = kzalloc(sizeof(bufs_copy[0]) * n_bufs, GFP_KERNEL);
+       if (!bufs_copy) {
+               ret = ERR_PTR(-ENOMEM);
+               goto err_alloc;
+       }
+
+       custom_data_copy = kzalloc(custom_data_size, GFP_KERNEL);
+       if (!custom_data_copy) {
+               ret = ERR_PTR(-ENOMEM);
+               goto err_alloc;
+       }
+
+       for (i = 0; i < n_bufs; i++) {
+               size_t j;
+               for (j = 0; j < bufs[i].n_planes; j++)
+                       get_dma_buf(bufs[i].dma_bufs[j]);
+       }
+
+       memcpy(intfs_copy, intfs, sizeof(intfs_copy[0]) * n_intfs);
+       memcpy(bufs_copy, bufs, sizeof(bufs_copy[0]) * n_bufs);
+       memcpy(custom_data_copy, custom_data, custom_data_size);
+
+       ret = adf_device_post_nocopy(dev, intfs_copy, n_intfs, bufs_copy,
+                       n_bufs, custom_data_copy, custom_data_size);
+       if (IS_ERR(ret))
+               goto err_post;
+
+       return ret;
+
+err_post:
+       for (i = 0; i < n_bufs; i++) {
+               size_t j;
+               for (j = 0; j < bufs[i].n_planes; j++)
+                       dma_buf_put(bufs[i].dma_bufs[j]);
+       }
+err_alloc:
+       kfree(custom_data_copy);
+       kfree(bufs_copy);
+       kfree(intfs_copy);
+       return ret;
+}
+EXPORT_SYMBOL(adf_device_post);
+
+/**
+ * adf_device_post_nocopy - flip to a new set of buffers
+ *
+ * adf_device_post_nocopy() has the same behavior as adf_device_post(),
+ * except ADF does not copy @intfs, @bufs, or @custom_data, and it does
+ * not take an extra reference on the dma-bufs in @bufs.
+ *
+ * @intfs, @bufs, and @custom_data must point to buffers allocated by
+ * kmalloc().  On success, ADF takes ownership of these buffers and the dma-bufs
+ * in @bufs, and will kfree()/dma_buf_put() them when they are no longer needed.
+ * On failure, adf_device_post_nocopy() does NOT take ownership of these
+ * buffers or the dma-bufs, and the caller must clean them up.
+ *
+ * adf_device_post_nocopy() is mainly intended for implementing ADF's ioctls.
+ * Clients may find the nocopy variant useful in limited cases, but most should
+ * call adf_device_post() instead.
+ */
+struct sync_fence *adf_device_post_nocopy(struct adf_device *dev,
+               struct adf_interface **intfs, size_t n_intfs,
+               struct adf_buffer *bufs, size_t n_bufs,
+               void *custom_data, size_t custom_data_size)
+{
+       struct adf_pending_post *cfg;
+       struct adf_buffer_mapping *mappings;
+       struct sync_fence *ret;
+       size_t i;
+       int err;
+
+       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+       if (!cfg)
+               return ERR_PTR(-ENOMEM);
+
+       mappings = kzalloc(sizeof(mappings[0]) * n_bufs, GFP_KERNEL);
+       if (!mappings) {
+               ret = ERR_PTR(-ENOMEM);
+               goto err_alloc;
+       }
+
+       mutex_lock(&dev->client_lock);
+
+       for (i = 0; i < n_bufs; i++) {
+               err = adf_buffer_validate(&bufs[i]);
+               if (err < 0) {
+                       ret = ERR_PTR(err);
+                       goto err_buf;
+               }
+
+               err = adf_buffer_map(dev, &bufs[i], &mappings[i]);
+               if (err < 0) {
+                       ret = ERR_PTR(err);
+                       goto err_buf;
+               }
+       }
+
+       INIT_LIST_HEAD(&cfg->head);
+       cfg->config.n_bufs = n_bufs;
+       cfg->config.bufs = bufs;
+       cfg->config.mappings = mappings;
+       cfg->config.custom_data = custom_data;
+       cfg->config.custom_data_size = custom_data_size;
+
+       err = dev->ops->validate(dev, &cfg->config, &cfg->state);
+       if (err < 0) {
+               ret = ERR_PTR(err);
+               goto err_buf;
+       }
+
+       mutex_lock(&dev->post_lock);
+
+       if (dev->ops->complete_fence)
+               ret = dev->ops->complete_fence(dev, &cfg->config,
+                               cfg->state);
+       else
+               ret = adf_sw_complete_fence(dev);
+
+       if (IS_ERR(ret))
+               goto err_fence;
+
+       list_add_tail(&cfg->head, &dev->post_list);
+       queue_kthread_work(&dev->post_worker, &dev->post_work);
+       mutex_unlock(&dev->post_lock);
+       mutex_unlock(&dev->client_lock);
+       kfree(intfs);
+       return ret;
+
+err_fence:
+       mutex_unlock(&dev->post_lock);
+
+err_buf:
+       for (i = 0; i < n_bufs; i++)
+               adf_buffer_mapping_cleanup(&mappings[i], &bufs[i]);
+
+       mutex_unlock(&dev->client_lock);
+       kfree(mappings);
+
+err_alloc:
+       kfree(cfg);
+       return ret;
+}
+EXPORT_SYMBOL(adf_device_post_nocopy);
+
+static void adf_attachment_list_to_array(struct adf_device *dev,
+               struct list_head *src, struct adf_attachment *dst, size_t size)
+{
+       struct adf_attachment_list *entry;
+       size_t i = 0;
+
+       if (!dst)
+               return;
+
+       list_for_each_entry(entry, src, head) {
+               if (i == size)
+                       return;
+               dst[i] = entry->attachment;
+               i++;
+       }
+}
+
+/**
+ * adf_device_attachments - get device's list of active attachments
+ *
+ * @dev: the device
+ * @attachments: storage for the attachment list (optional)
+ * @n_attachments: length of @attachments
+ *
+ * If @attachments is not NULL, adf_device_attachments() will copy up to
+ * @n_attachments entries into @attachments.
+ *
+ * Returns the length of the active attachment list.
+ */
+size_t adf_device_attachments(struct adf_device *dev,
+               struct adf_attachment *attachments, size_t n_attachments)
+{
+       size_t retval;
+
+       mutex_lock(&dev->client_lock);
+       adf_attachment_list_to_array(dev, &dev->attached, attachments,
+                       n_attachments);
+       retval = dev->n_attached;
+       mutex_unlock(&dev->client_lock);
+
+       return retval;
+}
+EXPORT_SYMBOL(adf_device_attachments);
+
+/**
+ * adf_device_attachments_allowed - get device's list of allowed attachments
+ *
+ * @dev: the device
+ * @attachments: storage for the attachment list (optional)
+ * @n_attachments: length of @attachments
+ *
+ * If @attachments is not NULL, adf_device_attachments_allowed() will copy up to
+ * @n_attachments entries into @attachments.
+ *
+ * Returns the length of the allowed attachment list.
+ */
+size_t adf_device_attachments_allowed(struct adf_device *dev,
+               struct adf_attachment *attachments, size_t n_attachments)
+{
+       size_t retval;
+
+       mutex_lock(&dev->client_lock);
+       adf_attachment_list_to_array(dev, &dev->attach_allowed, attachments,
+                       n_attachments);
+       retval = dev->n_attach_allowed;
+       mutex_unlock(&dev->client_lock);
+
+       return retval;
+}
+EXPORT_SYMBOL(adf_device_attachments_allowed);
+
+/**
+ * adf_device_attached - return whether an overlay engine and interface are
+ * attached
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ */
+bool adf_device_attached(struct adf_device *dev, struct adf_overlay_engine *eng,
+               struct adf_interface *intf)
+{
+       struct adf_attachment_list *attachment;
+
+       mutex_lock(&dev->client_lock);
+       attachment = adf_attachment_find(&dev->attached, eng, intf);
+       mutex_unlock(&dev->client_lock);
+
+       return attachment != NULL;
+}
+EXPORT_SYMBOL(adf_device_attached);
+
+/**
+ * adf_device_attach_allowed - return whether the ADF device supports attaching
+ * an overlay engine and interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ */
+bool adf_device_attach_allowed(struct adf_device *dev,
+               struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+       struct adf_attachment_list *attachment;
+
+       mutex_lock(&dev->client_lock);
+       attachment = adf_attachment_find(&dev->attach_allowed, eng, intf);
+       mutex_unlock(&dev->client_lock);
+
+       return attachment != NULL;
+}
+EXPORT_SYMBOL(adf_device_attach_allowed);
+/**
+ * adf_device_attach - attach an overlay engine to an interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * Returns 0 on success, -%EINVAL if attaching @intf and @eng is not allowed,
+ * -%EALREADY if @intf and @eng are already attached, or -errno on any other
+ * failure.
+ */
+int adf_device_attach(struct adf_device *dev, struct adf_overlay_engine *eng,
+               struct adf_interface *intf)
+{
+       int ret;
+       struct adf_attachment_list *attachment = NULL;
+
+       ret = adf_attachment_validate(dev, eng, intf);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&dev->client_lock);
+
+       if (dev->n_attached == ADF_MAX_ATTACHMENTS) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       if (!adf_attachment_find(&dev->attach_allowed, eng, intf)) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       if (adf_attachment_find(&dev->attached, eng, intf)) {
+               ret = -EALREADY;
+               goto done;
+       }
+
+       ret = adf_device_attach_op(dev, eng, intf);
+       if (ret < 0)
+               goto done;
+
+       attachment = kzalloc(sizeof(*attachment), GFP_KERNEL);
+       if (!attachment) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       attachment->attachment.interface = intf;
+       attachment->attachment.overlay_engine = eng;
+       list_add_tail(&attachment->head, &dev->attached);
+       dev->n_attached++;
+
+done:
+       mutex_unlock(&dev->client_lock);
+       if (ret < 0)
+               kfree(attachment);
+
+       return ret;
+}
+EXPORT_SYMBOL(adf_device_attach);
+
+/**
+ * adf_device_detach - detach an overlay engine from an interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * Returns 0 on success, -%EINVAL if @intf and @eng are not attached,
+ * or -errno on any other failure.
+ */
+int adf_device_detach(struct adf_device *dev, struct adf_overlay_engine *eng,
+               struct adf_interface *intf)
+{
+       int ret;
+       struct adf_attachment_list *attachment;
+
+       ret = adf_attachment_validate(dev, eng, intf);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&dev->client_lock);
+
+       attachment = adf_attachment_find(&dev->attached, eng, intf);
+       if (!attachment) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       ret = adf_device_detach_op(dev, eng, intf);
+       if (ret < 0)
+               goto done;
+
+       adf_attachment_free(attachment);
+       dev->n_attached--;
+done:
+       mutex_unlock(&dev->client_lock);
+       return ret;
+}
+EXPORT_SYMBOL(adf_device_detach);
+
+/**
+ * adf_interface_simple_buffer_alloc - allocate a simple buffer
+ *
+ * @intf: target interface
+ * @w: width in pixels
+ * @h: height in pixels
+ * @format: format fourcc
+ * @dma_buf: returns the allocated buffer
+ * @offset: returns the byte offset of the allocated buffer's first pixel
+ * @pitch: returns the allocated buffer's pitch
+ *
+ * See &struct adf_simple_buffer_alloc for a description of simple buffers and
+ * their limitations.
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_simple_buffer_alloc(struct adf_interface *intf, u16 w, u16 h,
+               u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch)
+{
+       if (!intf->ops || !intf->ops->alloc_simple_buffer)
+               return -EOPNOTSUPP;
+
+       if (!adf_format_is_rgb(format))
+               return -EINVAL;
+
+       return intf->ops->alloc_simple_buffer(intf, w, h, format, dma_buf,
+                       offset, pitch);
+}
+EXPORT_SYMBOL(adf_interface_simple_buffer_alloc);
+
+/**
+ * adf_interface_simple_post - flip to a single buffer
+ *
+ * @intf: interface targeted by the flip
+ * @buf: buffer to display
+ *
+ * adf_interface_simple_post() can be used generically for simple display
+ * configurations, since the client does not need to provide any driver-private
+ * configuration data.
+ *
+ * adf_interface_simple_post() has the same copying semantics as
+ * adf_device_post().
+ *
+ * On success, returns a sync fence which signals when the buffer is removed
+ * from the screen.  On failure, returns ERR_PTR(-errno).
+ */
+struct sync_fence *adf_interface_simple_post(struct adf_interface *intf,
+               struct adf_buffer *buf)
+{
+       size_t custom_data_size = 0;
+       void *custom_data = NULL;
+       struct sync_fence *ret;
+
+       if (intf->ops && intf->ops->describe_simple_post) {
+               int err;
+
+               custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
+               if (!custom_data) {
+                       ret = ERR_PTR(-ENOMEM);
+                       goto done;
+               }
+
+               err = intf->ops->describe_simple_post(intf, buf, custom_data,
+                               &custom_data_size);
+               if (err < 0) {
+                       ret = ERR_PTR(err);
+                       goto done;
+               }
+       }
+
+       ret = adf_device_post(adf_interface_parent(intf), &intf, 1, buf, 1,
+                       custom_data, custom_data_size);
+done:
+       kfree(custom_data);
+       return ret;
+}
+EXPORT_SYMBOL(adf_interface_simple_post);
diff --git a/drivers/video/adf/adf_fbdev.c b/drivers/video/adf/adf_fbdev.c
new file mode 100644 (file)
index 0000000..a5b53bc
--- /dev/null
@@ -0,0 +1,665 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/vmalloc.h>
+
+#include <video/adf.h>
+#include <video/adf_client.h>
+#include <video/adf_fbdev.h>
+#include <video/adf_format.h>
+
+#include "adf.h"
+
+struct adf_fbdev_format {
+       u32 fourcc;
+       u32 bpp;
+       u32 r_length;
+       u32 g_length;
+       u32 b_length;
+       u32 a_length;
+       u32 r_offset;
+       u32 g_offset;
+       u32 b_offset;
+       u32 a_offset;
+};
+
+static const struct adf_fbdev_format format_table[] = {
+       {DRM_FORMAT_RGB332, 8, 3, 3, 2, 0, 5, 2, 0, 0},
+       {DRM_FORMAT_BGR233, 8, 3, 3, 2, 0, 0, 3, 5, 0},
+
+       {DRM_FORMAT_XRGB4444, 16, 4, 4, 4, 0, 8, 4, 0, 0},
+       {DRM_FORMAT_XBGR4444, 16, 4, 4, 4, 0, 0, 4, 8, 0},
+       {DRM_FORMAT_RGBX4444, 16, 4, 4, 4, 0, 12, 8, 4, 0},
+       {DRM_FORMAT_BGRX4444, 16, 4, 4, 4, 0, 0, 4, 8, 0},
+
+       {DRM_FORMAT_ARGB4444, 16, 4, 4, 4, 4, 8, 4, 0, 12},
+       {DRM_FORMAT_ABGR4444, 16, 4, 4, 4, 4, 0, 4, 8, 12},
+       {DRM_FORMAT_RGBA4444, 16, 4, 4, 4, 4, 12, 8, 4, 0},
+       {DRM_FORMAT_BGRA4444, 16, 4, 4, 4, 4, 0, 4, 8, 0},
+
+       {DRM_FORMAT_XRGB1555, 16, 5, 5, 5, 0, 10, 5, 0, 0},
+       {DRM_FORMAT_XBGR1555, 16, 5, 5, 5, 0, 0, 5, 10, 0},
+       {DRM_FORMAT_RGBX5551, 16, 5, 5, 5, 0, 11, 6, 1, 0},
+       {DRM_FORMAT_BGRX5551, 16, 5, 5, 5, 0, 1, 6, 11, 0},
+
+       {DRM_FORMAT_ARGB1555, 16, 5, 5, 5, 1, 10, 5, 0, 15},
+       {DRM_FORMAT_ABGR1555, 16, 5, 5, 5, 1, 0, 5, 10, 15},
+       {DRM_FORMAT_RGBA5551, 16, 5, 5, 5, 1, 11, 6, 1, 0},
+       {DRM_FORMAT_BGRA5551, 16, 5, 5, 5, 1, 1, 6, 11, 0},
+
+       {DRM_FORMAT_RGB565, 16, 5, 6, 5, 0, 11, 5, 0, 0},
+       {DRM_FORMAT_BGR565, 16, 5, 6, 5, 0, 0, 5, 11, 0},
+
+       {DRM_FORMAT_RGB888, 24, 8, 8, 8, 0, 16, 8, 0, 0},
+       {DRM_FORMAT_BGR888, 24, 8, 8, 8, 0, 0, 8, 16, 0},
+
+       {DRM_FORMAT_XRGB8888, 32, 8, 8, 8, 0, 16, 8, 0, 0},
+       {DRM_FORMAT_XBGR8888, 32, 8, 8, 8, 0, 0, 8, 16, 0},
+       {DRM_FORMAT_RGBX8888, 32, 8, 8, 8, 0, 24, 16, 8, 0},
+       {DRM_FORMAT_BGRX8888, 32, 8, 8, 8, 0, 8, 16, 24, 0},
+
+       {DRM_FORMAT_ARGB8888, 32, 8, 8, 8, 8, 16, 8, 0, 24},
+       {DRM_FORMAT_ABGR8888, 32, 8, 8, 8, 8, 0, 8, 16, 24},
+       {DRM_FORMAT_RGBA8888, 32, 8, 8, 8, 8, 24, 16, 8, 0},
+       {DRM_FORMAT_BGRA8888, 32, 8, 8, 8, 8, 8, 16, 24, 0},
+
+       {DRM_FORMAT_XRGB2101010, 32, 10, 10, 10, 0, 20, 10, 0, 0},
+       {DRM_FORMAT_XBGR2101010, 32, 10, 10, 10, 0, 0, 10, 20, 0},
+       {DRM_FORMAT_RGBX1010102, 32, 10, 10, 10, 0, 22, 12, 2, 0},
+       {DRM_FORMAT_BGRX1010102, 32, 10, 10, 10, 0, 2, 12, 22, 0},
+
+       {DRM_FORMAT_ARGB2101010, 32, 10, 10, 10, 2, 20, 10, 0, 30},
+       {DRM_FORMAT_ABGR2101010, 32, 10, 10, 10, 2, 0, 10, 20, 30},
+       {DRM_FORMAT_RGBA1010102, 32, 10, 10, 10, 2, 22, 12, 2, 0},
+       {DRM_FORMAT_BGRA1010102, 32, 10, 10, 10, 2, 2, 12, 22, 0},
+};
+
+static u32 drm_fourcc_from_fb_var(struct fb_var_screeninfo *var)
+{
+       size_t i;
+       for (i = 0; i < ARRAY_SIZE(format_table); i++) {
+               const struct adf_fbdev_format *f = &format_table[i];
+               if (var->red.length == f->r_length &&
+                       var->red.offset == f->r_offset &&
+                       var->green.length == f->g_length &&
+                       var->green.offset == f->g_offset &&
+                       var->blue.length == f->b_length &&
+                       var->blue.offset == f->b_offset &&
+                       var->transp.length == f->a_length &&
+                       (var->transp.length == 0 ||
+                                       var->transp.offset == f->a_offset))
+                       return f->fourcc;
+       }
+
+       return 0;
+}
+
+static const struct adf_fbdev_format *fbdev_format_info(u32 format)
+{
+       size_t i;
+       for (i = 0; i < ARRAY_SIZE(format_table); i++) {
+               const struct adf_fbdev_format *f = &format_table[i];
+               if (f->fourcc == format)
+                       return f;
+       }
+
+       BUG();
+}
+
+void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
+               struct fb_videomode *vmode)
+{
+       memset(vmode, 0, sizeof(*vmode));
+
+       vmode->refresh = mode->vrefresh;
+
+       vmode->xres = mode->hdisplay;
+       vmode->yres = mode->vdisplay;
+
+       vmode->pixclock = mode->clock ? KHZ2PICOS(mode->clock) : 0;
+       vmode->left_margin = mode->htotal - mode->hsync_end;
+       vmode->right_margin = mode->hsync_start - mode->hdisplay;
+       vmode->upper_margin = mode->vtotal - mode->vsync_end;
+       vmode->lower_margin = mode->vsync_start - mode->vdisplay;
+       vmode->hsync_len = mode->hsync_end - mode->hsync_start;
+       vmode->vsync_len = mode->vsync_end - mode->vsync_start;
+
+       vmode->sync = 0;
+       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+               vmode->sync |= FB_SYNC_HOR_HIGH_ACT;
+       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+               vmode->sync |= FB_SYNC_VERT_HIGH_ACT;
+       if (mode->flags & DRM_MODE_FLAG_PCSYNC)
+               vmode->sync |= FB_SYNC_COMP_HIGH_ACT;
+       if (mode->flags & DRM_MODE_FLAG_BCAST)
+               vmode->sync |= FB_SYNC_BROADCAST;
+
+       vmode->vmode = 0;
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               vmode->vmode |= FB_VMODE_INTERLACED;
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               vmode->vmode |= FB_VMODE_DOUBLE;
+}
+EXPORT_SYMBOL(adf_modeinfo_to_fb_videomode);
+
+void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode,
+               struct drm_mode_modeinfo *mode)
+{
+       memset(mode, 0, sizeof(*mode));
+
+       mode->hdisplay = vmode->xres;
+       mode->hsync_start = mode->hdisplay + vmode->right_margin;
+       mode->hsync_end = mode->hsync_start + vmode->hsync_len;
+       mode->htotal = mode->hsync_end + vmode->left_margin;
+
+       mode->vdisplay = vmode->yres;
+       mode->vsync_start = mode->vdisplay + vmode->lower_margin;
+       mode->vsync_end = mode->vsync_start + vmode->vsync_len;
+       mode->vtotal = mode->vsync_end + vmode->upper_margin;
+
+       mode->clock = vmode->pixclock ? PICOS2KHZ(vmode->pixclock) : 0;
+
+       mode->flags = 0;
+       if (vmode->sync & FB_SYNC_HOR_HIGH_ACT)
+               mode->flags |= DRM_MODE_FLAG_PHSYNC;
+       if (vmode->sync & FB_SYNC_VERT_HIGH_ACT)
+               mode->flags |= DRM_MODE_FLAG_PVSYNC;
+       if (vmode->sync & FB_SYNC_COMP_HIGH_ACT)
+               mode->flags |= DRM_MODE_FLAG_PCSYNC;
+       if (vmode->sync & FB_SYNC_BROADCAST)
+               mode->flags |= DRM_MODE_FLAG_BCAST;
+       if (vmode->vmode & FB_VMODE_INTERLACED)
+               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+       if (vmode->vmode & FB_VMODE_DOUBLE)
+               mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+       if (vmode->refresh)
+               mode->vrefresh = vmode->refresh;
+       else
+               adf_modeinfo_set_vrefresh(mode);
+
+       if (vmode->name)
+               strlcpy(mode->name, vmode->name, sizeof(mode->name));
+       else
+               adf_modeinfo_set_name(mode);
+}
+EXPORT_SYMBOL(adf_modeinfo_from_fb_videomode);
+
+static int adf_fbdev_post(struct adf_fbdev *fbdev)
+{
+       struct adf_buffer buf;
+       struct sync_fence *complete_fence;
+       int ret = 0;
+
+       memset(&buf, 0, sizeof(buf));
+       buf.overlay_engine = fbdev->eng;
+       buf.w = fbdev->info->var.xres;
+       buf.h = fbdev->info->var.yres;
+       buf.format = fbdev->format;
+       buf.dma_bufs[0] = fbdev->dma_buf;
+       buf.offset[0] = fbdev->offset +
+                       fbdev->info->var.yoffset * fbdev->pitch +
+                       fbdev->info->var.xoffset *
+                       (fbdev->info->var.bits_per_pixel / 8);
+       buf.pitch[0] = fbdev->pitch;
+       buf.n_planes = 1;
+
+       complete_fence = adf_interface_simple_post(fbdev->intf, &buf);
+       if (IS_ERR(complete_fence)) {
+               ret = PTR_ERR(complete_fence);
+               goto done;
+       }
+
+       sync_fence_put(complete_fence);
+done:
+       return ret;
+}
+
+static const u16 vga_palette[][3] = {
+       {0x0000, 0x0000, 0x0000},
+       {0x0000, 0x0000, 0xAAAA},
+       {0x0000, 0xAAAA, 0x0000},
+       {0x0000, 0xAAAA, 0xAAAA},
+       {0xAAAA, 0x0000, 0x0000},
+       {0xAAAA, 0x0000, 0xAAAA},
+       {0xAAAA, 0x5555, 0x0000},
+       {0xAAAA, 0xAAAA, 0xAAAA},
+       {0x5555, 0x5555, 0x5555},
+       {0x5555, 0x5555, 0xFFFF},
+       {0x5555, 0xFFFF, 0x5555},
+       {0x5555, 0xFFFF, 0xFFFF},
+       {0xFFFF, 0x5555, 0x5555},
+       {0xFFFF, 0x5555, 0xFFFF},
+       {0xFFFF, 0xFFFF, 0x5555},
+       {0xFFFF, 0xFFFF, 0xFFFF},
+};
+
+static int adf_fb_alloc(struct adf_fbdev *fbdev)
+{
+       int ret;
+
+       ret = adf_interface_simple_buffer_alloc(fbdev->intf,
+                       fbdev->default_xres_virtual,
+                       fbdev->default_yres_virtual,
+                       fbdev->default_format,
+                       &fbdev->dma_buf, &fbdev->offset, &fbdev->pitch);
+       if (ret < 0) {
+               dev_err(fbdev->info->dev, "allocating fb failed: %d\n", ret);
+               return ret;
+       }
+
+       fbdev->vaddr = dma_buf_vmap(fbdev->dma_buf);
+       if (!fbdev->vaddr) {
+               ret = -ENOMEM;
+               dev_err(fbdev->info->dev, "vmapping fb failed\n");
+               goto err_vmap;
+       }
+       fbdev->info->fix.line_length = fbdev->pitch;
+       fbdev->info->var.xres_virtual = fbdev->default_xres_virtual;
+       fbdev->info->var.yres_virtual = fbdev->default_yres_virtual;
+       fbdev->info->fix.smem_len = fbdev->dma_buf->size;
+       fbdev->info->screen_base = fbdev->vaddr;
+
+       return 0;
+
+err_vmap:
+       dma_buf_put(fbdev->dma_buf);
+       return ret;
+}
+
+static void adf_fb_destroy(struct adf_fbdev *fbdev)
+{
+       dma_buf_vunmap(fbdev->dma_buf, fbdev->vaddr);
+       dma_buf_put(fbdev->dma_buf);
+}
+
+static void adf_fbdev_set_format(struct adf_fbdev *fbdev, u32 format)
+{
+       size_t i;
+       const struct adf_fbdev_format *info = fbdev_format_info(format);
+       for (i = 0; i < ARRAY_SIZE(vga_palette); i++) {
+               u16 r = vga_palette[i][0];
+               u16 g = vga_palette[i][1];
+               u16 b = vga_palette[i][2];
+
+               r >>= (16 - info->r_length);
+               g >>= (16 - info->g_length);
+               b >>= (16 - info->b_length);
+
+               fbdev->pseudo_palette[i] =
+                       (r << info->r_offset) |
+                       (g << info->g_offset) |
+                       (b << info->b_offset);
+
+               if (info->a_length) {
+                       u16 a = BIT(info->a_length) - 1;
+                       fbdev->pseudo_palette[i] |= (a << info->a_offset);
+               }
+       }
+
+       fbdev->info->var.bits_per_pixel = adf_format_bpp(format);
+       fbdev->info->var.red.length = info->r_length;
+       fbdev->info->var.red.offset = info->r_offset;
+       fbdev->info->var.green.length = info->g_length;
+       fbdev->info->var.green.offset = info->g_offset;
+       fbdev->info->var.blue.length = info->b_length;
+       fbdev->info->var.blue.offset = info->b_offset;
+       fbdev->info->var.transp.length = info->a_length;
+       fbdev->info->var.transp.offset = info->a_offset;
+       fbdev->format = format;
+}
+
+static void adf_fbdev_fill_modelist(struct adf_fbdev *fbdev)
+{
+       struct drm_mode_modeinfo *modelist;
+       struct fb_videomode fbmode;
+       size_t n_modes, i;
+       int ret = 0;
+
+       n_modes = adf_interface_modelist(fbdev->intf, NULL, 0);
+       modelist = kzalloc(sizeof(modelist[0]) * n_modes, GFP_KERNEL);
+       if (!modelist) {
+               dev_warn(fbdev->info->dev, "allocating new modelist failed; keeping old modelist\n");
+               return;
+       }
+       adf_interface_modelist(fbdev->intf, modelist, n_modes);
+
+       fb_destroy_modelist(&fbdev->info->modelist);
+
+       for (i = 0; i < n_modes; i++) {
+               adf_modeinfo_to_fb_videomode(&modelist[i], &fbmode);
+               ret = fb_add_videomode(&fbmode, &fbdev->info->modelist);
+               if (ret < 0)
+                       dev_warn(fbdev->info->dev, "adding mode %s to modelist failed: %d\n",
+                                       modelist[i].name, ret);
+       }
+
+       kfree(modelist);
+}
+
+/**
+ * adf_fbdev_open - default implementation of fbdev open op
+ */
+int adf_fbdev_open(struct fb_info *info, int user)
+{
+       struct adf_fbdev *fbdev = info->par;
+       int ret;
+
+       mutex_lock(&fbdev->refcount_lock);
+
+       if (unlikely(fbdev->refcount == UINT_MAX)) {
+               ret = -EMFILE;
+               goto done;
+       }
+
+       if (!fbdev->refcount) {
+               struct drm_mode_modeinfo mode;
+               struct fb_videomode fbmode;
+               struct adf_device *dev = adf_interface_parent(fbdev->intf);
+
+               ret = adf_device_attach(dev, fbdev->eng, fbdev->intf);
+               if (ret < 0 && ret != -EALREADY)
+                       goto done;
+
+               ret = adf_fb_alloc(fbdev);
+               if (ret < 0)
+                       goto done;
+
+               adf_interface_current_mode(fbdev->intf, &mode);
+               adf_modeinfo_to_fb_videomode(&mode, &fbmode);
+               fb_videomode_to_var(&fbdev->info->var, &fbmode);
+
+               adf_fbdev_set_format(fbdev, fbdev->default_format);
+               adf_fbdev_fill_modelist(fbdev);
+       }
+
+       ret = adf_fbdev_post(fbdev);
+       if (ret < 0) {
+               if (!fbdev->refcount)
+                       adf_fb_destroy(fbdev);
+               goto done;
+       }
+
+       fbdev->refcount++;
+done:
+       mutex_unlock(&fbdev->refcount_lock);
+       return ret;
+}
+EXPORT_SYMBOL(adf_fbdev_open);
+
+/**
+ * adf_fbdev_release - default implementation of fbdev release op
+ */
+int adf_fbdev_release(struct fb_info *info, int user)
+{
+       struct adf_fbdev *fbdev = info->par;
+       mutex_lock(&fbdev->refcount_lock);
+       BUG_ON(!fbdev->refcount);
+       fbdev->refcount--;
+       if (!fbdev->refcount)
+               adf_fb_destroy(fbdev);
+       mutex_unlock(&fbdev->refcount_lock);
+       return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_release);
+
+/**
+ * adf_fbdev_check_var - default implementation of fbdev check_var op
+ */
+int adf_fbdev_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+       struct adf_fbdev *fbdev = info->par;
+       bool valid_format = true;
+       u32 format = drm_fourcc_from_fb_var(var);
+       u32 pitch = var->xres_virtual * var->bits_per_pixel / 8;
+
+       if (!format) {
+               dev_dbg(info->dev, "%s: unrecognized format\n", __func__);
+               valid_format = false;
+       }
+
+       if (valid_format && var->grayscale) {
+               dev_dbg(info->dev, "%s: grayscale modes not supported\n",
+                               __func__);
+               valid_format = false;
+       }
+
+       if (valid_format && var->nonstd) {
+               dev_dbg(info->dev, "%s: nonstandard formats not supported\n",
+                               __func__);
+               valid_format = false;
+       }
+
+       if (valid_format && !adf_overlay_engine_supports_format(fbdev->eng,
+                       format)) {
+               char format_str[ADF_FORMAT_STR_SIZE];
+               adf_format_str(format, format_str);
+               dev_dbg(info->dev, "%s: format %s not supported by overlay engine %s\n",
+                               __func__, format_str, fbdev->eng->base.name);
+               valid_format = false;
+       }
+
+       if (valid_format && pitch > fbdev->pitch) {
+               dev_dbg(info->dev, "%s: fb pitch too small for var (pitch = %u, xres_virtual = %u, bits_per_pixel = %u)\n",
+                               __func__, fbdev->pitch, var->xres_virtual,
+                               var->bits_per_pixel);
+               valid_format = false;
+       }
+
+       if (valid_format && var->yres_virtual > fbdev->default_yres_virtual) {
+               dev_dbg(info->dev, "%s: fb height too small for var (h = %u, yres_virtual = %u)\n",
+                               __func__, fbdev->default_yres_virtual,
+                               var->yres_virtual);
+               valid_format = false;
+       }
+
+       if (valid_format) {
+               var->activate = info->var.activate;
+               var->height = info->var.height;
+               var->width = info->var.width;
+               var->accel_flags = info->var.accel_flags;
+               var->rotate = info->var.rotate;
+               var->colorspace = info->var.colorspace;
+               /* userspace can't change these */
+       } else {
+               /* if any part of the format is invalid then fixing it up is
+                  impractical, so save just the modesetting bits and
+                  overwrite everything else */
+               struct fb_videomode mode;
+               fb_var_to_videomode(&mode, var);
+               memcpy(var, &info->var, sizeof(*var));
+               fb_videomode_to_var(var, &mode);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_check_var);
+
+/**
+ * adf_fbdev_set_par - default implementation of fbdev set_par op
+ */
+int adf_fbdev_set_par(struct fb_info *info)
+{
+       struct adf_fbdev *fbdev = info->par;
+       struct adf_interface *intf = fbdev->intf;
+       struct fb_videomode vmode;
+       struct drm_mode_modeinfo mode;
+       int ret;
+       u32 format = drm_fourcc_from_fb_var(&info->var);
+
+       fb_var_to_videomode(&vmode, &info->var);
+       adf_modeinfo_from_fb_videomode(&vmode, &mode);
+       ret = adf_interface_set_mode(intf, &mode);
+       if (ret < 0)
+               return ret;
+
+       ret = adf_fbdev_post(fbdev);
+       if (ret < 0)
+               return ret;
+
+       if (format != fbdev->format)
+               adf_fbdev_set_format(fbdev, format);
+
+       return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_set_par);
+
+/**
+ * adf_fbdev_blank - default implementation of fbdev blank op
+ */
+int adf_fbdev_blank(int blank, struct fb_info *info)
+{
+       struct adf_fbdev *fbdev = info->par;
+       struct adf_interface *intf = fbdev->intf;
+       u8 dpms_state;
+
+       switch (blank) {
+       case FB_BLANK_UNBLANK:
+               dpms_state = DRM_MODE_DPMS_ON;
+               break;
+       case FB_BLANK_NORMAL:
+               dpms_state = DRM_MODE_DPMS_STANDBY;
+               break;
+       case FB_BLANK_VSYNC_SUSPEND:
+               dpms_state = DRM_MODE_DPMS_SUSPEND;
+               break;
+       case FB_BLANK_HSYNC_SUSPEND:
+               dpms_state = DRM_MODE_DPMS_STANDBY;
+               break;
+       case FB_BLANK_POWERDOWN:
+               dpms_state = DRM_MODE_DPMS_OFF;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return adf_interface_blank(intf, dpms_state);
+}
+EXPORT_SYMBOL(adf_fbdev_blank);
+
+/**
+ * adf_fbdev_pan_display - default implementation of fbdev pan_display op
+ */
+int adf_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+       struct adf_fbdev *fbdev = info->par;
+       return adf_fbdev_post(fbdev);
+}
+EXPORT_SYMBOL(adf_fbdev_pan_display);
+
+/**
+ * adf_fbdev_mmap - default implementation of fbdev mmap op
+ */
+int adf_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+       struct adf_fbdev *fbdev = info->par;
+
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       return dma_buf_mmap(fbdev->dma_buf, vma, 0);
+}
+EXPORT_SYMBOL(adf_fbdev_mmap);
+
+/**
+ * adf_fbdev_init - initialize helper to wrap ADF device in fbdev API
+ *
+ * @fbdev: the fbdev helper
+ * @interface: the ADF interface that will display the framebuffer
+ * @eng: the ADF overlay engine that will scan out the framebuffer
+ * @xres_virtual: the virtual width of the framebuffer
+ * @yres_virtual: the virtual height of the framebuffer
+ * @format: the format of the framebuffer
+ * @fbops: the device's fbdev ops
+ * @fmt: formatting for the framebuffer identification string
+ * @...: variable arguments
+ *
+ * @format must be a standard, non-indexed RGB format, i.e.,
+ * adf_format_is_rgb(@format) && @format != @DRM_FORMAT_C8.
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_fbdev_init(struct adf_fbdev *fbdev, struct adf_interface *interface,
+               struct adf_overlay_engine *eng,
+               u16 xres_virtual, u16 yres_virtual, u32 format,
+               struct fb_ops *fbops, const char *fmt, ...)
+{
+       struct adf_device *parent = adf_interface_parent(interface);
+       struct device *dev = &parent->base.dev;
+       u16 width_mm, height_mm;
+       va_list args;
+       int ret;
+
+       if (!adf_format_is_rgb(format) ||
+                       format == DRM_FORMAT_C8) {
+               dev_err(dev, "fbdev helper does not support format %u\n",
+                               format);
+               return -EINVAL;
+       }
+
+       memset(fbdev, 0, sizeof(*fbdev));
+       fbdev->intf = interface;
+       fbdev->eng = eng;
+       fbdev->info = framebuffer_alloc(0, dev);
+       if (!fbdev->info) {
+               dev_err(dev, "allocating framebuffer device failed\n");
+               return -ENOMEM;
+       }
+       mutex_init(&fbdev->refcount_lock);
+       fbdev->default_xres_virtual = xres_virtual;
+       fbdev->default_yres_virtual = yres_virtual;
+       fbdev->default_format = format;
+
+       fbdev->info->flags = FBINFO_FLAG_DEFAULT;
+       ret = adf_interface_get_screen_size(interface, &width_mm, &height_mm);
+       if (ret < 0) {
+               width_mm = 0;
+               height_mm = 0;
+       }
+       fbdev->info->var.width = width_mm;
+       fbdev->info->var.height = height_mm;
+       fbdev->info->var.activate = FB_ACTIVATE_VBL;
+       va_start(args, fmt);
+       vsnprintf(fbdev->info->fix.id, sizeof(fbdev->info->fix.id), fmt, args);
+       va_end(args);
+       fbdev->info->fix.type = FB_TYPE_PACKED_PIXELS;
+       fbdev->info->fix.visual = FB_VISUAL_TRUECOLOR;
+       fbdev->info->fix.xpanstep = 1;
+       fbdev->info->fix.ypanstep = 1;
+       INIT_LIST_HEAD(&fbdev->info->modelist);
+       fbdev->info->fbops = fbops;
+       fbdev->info->pseudo_palette = fbdev->pseudo_palette;
+       fbdev->info->par = fbdev;
+
+       ret = register_framebuffer(fbdev->info);
+       if (ret < 0) {
+               dev_err(dev, "registering framebuffer failed: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_init);
+
+/**
+ * adf_fbdev_destroy - destroy helper to wrap ADF device in fbdev API
+ *
+ * @fbdev: the fbdev helper
+ */
+void adf_fbdev_destroy(struct adf_fbdev *fbdev)
+{
+       unregister_framebuffer(fbdev->info);
+       BUG_ON(fbdev->refcount);
+       mutex_destroy(&fbdev->refcount_lock);
+       framebuffer_release(fbdev->info);
+}
+EXPORT_SYMBOL(adf_fbdev_destroy);
diff --git a/drivers/video/adf/adf_fops.c b/drivers/video/adf/adf_fops.c
new file mode 100644 (file)
index 0000000..705411b
--- /dev/null
@@ -0,0 +1,946 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/circ_buf.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <video/adf_client.h>
+#include <video/adf_format.h>
+
+#include "sw_sync.h"
+#include "sync.h"
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+#ifdef CONFIG_COMPAT
+#include "adf_fops32.h"
+#endif
+
+static int adf_obj_set_event(struct adf_obj *obj, struct adf_file *file,
+               struct adf_set_event __user *arg)
+{
+       struct adf_set_event data;
+       bool enabled;
+       unsigned long flags;
+       int err;
+
+       if (copy_from_user(&data, arg, sizeof(data)))
+               return -EFAULT;
+
+       err = adf_obj_check_supports_event(obj, data.type);
+       if (err < 0)
+               return err;
+
+       spin_lock_irqsave(&obj->file_lock, flags);
+       if (data.enabled)
+               enabled = test_and_set_bit(data.type,
+                               file->event_subscriptions);
+       else
+               enabled = test_and_clear_bit(data.type,
+                               file->event_subscriptions);
+       spin_unlock_irqrestore(&obj->file_lock, flags);
+
+       if (data.enabled == enabled)
+               return -EALREADY;
+
+       if (data.enabled)
+               adf_event_get(obj, data.type);
+       else
+               adf_event_put(obj, data.type);
+
+       return 0;
+}
+
+static int adf_obj_copy_custom_data_to_user(struct adf_obj *obj,
+               void __user *dst, size_t *dst_size)
+{
+       void *custom_data;
+       size_t custom_data_size;
+       int ret;
+
+       if (!obj->ops || !obj->ops->custom_data) {
+               dev_dbg(&obj->dev, "%s: no custom_data op\n", __func__);
+               return 0;
+       }
+
+       custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
+       if (!custom_data)
+               return -ENOMEM;
+
+       ret = obj->ops->custom_data(obj, custom_data, &custom_data_size);
+       if (ret < 0)
+               goto done;
+
+       if (copy_to_user(dst, custom_data, min(*dst_size, custom_data_size))) {
+               ret = -EFAULT;
+               goto done;
+       }
+       *dst_size = custom_data_size;
+
+done:
+       kfree(custom_data);
+       return ret;
+}
+
+static int adf_eng_get_data(struct adf_overlay_engine *eng,
+               struct adf_overlay_engine_data __user *arg)
+{
+       struct adf_device *dev = adf_overlay_engine_parent(eng);
+       struct adf_overlay_engine_data data;
+       size_t n_supported_formats;
+       u32 *supported_formats = NULL;
+       int ret = 0;
+
+       if (copy_from_user(&data, arg, sizeof(data)))
+               return -EFAULT;
+
+       strlcpy(data.name, eng->base.name, sizeof(data.name));
+
+       if (data.n_supported_formats > ADF_MAX_SUPPORTED_FORMATS)
+               return -EINVAL;
+
+       n_supported_formats = data.n_supported_formats;
+       data.n_supported_formats = eng->ops->n_supported_formats;
+
+       if (n_supported_formats) {
+               supported_formats = kzalloc(n_supported_formats *
+                               sizeof(supported_formats[0]), GFP_KERNEL);
+               if (!supported_formats)
+                       return -ENOMEM;
+       }
+
+       memcpy(supported_formats, eng->ops->supported_formats,
+                       sizeof(u32) * min(n_supported_formats,
+                                       eng->ops->n_supported_formats));
+
+       mutex_lock(&dev->client_lock);
+       ret = adf_obj_copy_custom_data_to_user(&eng->base, data.custom_data,
+                       &data.custom_data_size);
+       mutex_unlock(&dev->client_lock);
+
+       if (ret < 0)
+               goto done;
+
+       if (copy_to_user(arg, &data, sizeof(data))) {
+               ret = -EFAULT;
+               goto done;
+       }
+
+       if (supported_formats && copy_to_user(data.supported_formats,
+                       supported_formats,
+                       n_supported_formats * sizeof(supported_formats[0])))
+               ret = -EFAULT;
+
+done:
+       kfree(supported_formats);
+       return ret;
+}
+
+static int adf_buffer_import(struct adf_device *dev,
+               struct adf_buffer_config __user *cfg, struct adf_buffer *buf)
+{
+       struct adf_buffer_config user_buf;
+       size_t i;
+       int ret = 0;
+
+       if (copy_from_user(&user_buf, cfg, sizeof(user_buf)))
+               return -EFAULT;
+
+       memset(buf, 0, sizeof(*buf));
+
+       if (user_buf.n_planes > ADF_MAX_PLANES) {
+               dev_err(&dev->base.dev, "invalid plane count %u\n",
+                               user_buf.n_planes);
+               return -EINVAL;
+       }
+
+       buf->overlay_engine = idr_find(&dev->overlay_engines,
+                       user_buf.overlay_engine);
+       if (!buf->overlay_engine) {
+               dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
+                               user_buf.overlay_engine);
+               return -ENOENT;
+       }
+
+       buf->w = user_buf.w;
+       buf->h = user_buf.h;
+       buf->format = user_buf.format;
+       for (i = 0; i < user_buf.n_planes; i++) {
+               buf->dma_bufs[i] = dma_buf_get(user_buf.fd[i]);
+               if (IS_ERR(buf->dma_bufs[i])) {
+                       ret = PTR_ERR(buf->dma_bufs[i]);
+                       dev_err(&dev->base.dev, "importing dma_buf fd %d failed: %d\n",
+                                       user_buf.fd[i], ret);
+                       buf->dma_bufs[i] = NULL;
+                       goto done;
+               }
+               buf->offset[i] = user_buf.offset[i];
+               buf->pitch[i] = user_buf.pitch[i];
+       }
+       buf->n_planes = user_buf.n_planes;
+
+       if (user_buf.acquire_fence >= 0) {
+               buf->acquire_fence = sync_fence_fdget(user_buf.acquire_fence);
+               if (!buf->acquire_fence) {
+                       dev_err(&dev->base.dev, "getting fence fd %d failed\n",
+                                       user_buf.acquire_fence);
+                       ret = -EINVAL;
+                       goto done;
+               }
+       }
+
+done:
+       if (ret < 0)
+               adf_buffer_cleanup(buf);
+       return ret;
+}
+
+static int adf_device_post_config(struct adf_device *dev,
+               struct adf_post_config __user *arg)
+{
+       struct sync_fence *complete_fence;
+       int complete_fence_fd;
+       struct adf_buffer *bufs = NULL;
+       struct adf_interface **intfs = NULL;
+       struct adf_post_config data;
+       size_t i;
+       void *custom_data = NULL;
+       int ret = 0;
+
+       if (copy_from_user(&data, arg, sizeof(data)))
+               return -EFAULT;
+
+       complete_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+       if (complete_fence_fd < 0)
+               return complete_fence_fd;
+
+       if (data.n_interfaces > ADF_MAX_INTERFACES) {
+               ret = -EINVAL;
+               goto err_get_user;
+       }
+
+       if (data.n_bufs > ADF_MAX_BUFFERS) {
+               ret = -EINVAL;
+               goto err_get_user;
+       }
+
+       if (data.custom_data_size > ADF_MAX_CUSTOM_DATA_SIZE) {
+               ret = -EINVAL;
+               goto err_get_user;
+       }
+
+       if (data.n_interfaces) {
+               intfs = kmalloc(sizeof(intfs[0]) * data.n_interfaces,
+                       GFP_KERNEL);
+               if (!intfs) {
+                       ret = -ENOMEM;
+                       goto err_get_user;
+               }
+       }
+
+       for (i = 0; i < data.n_interfaces; i++) {
+               u32 intf_id;
+               if (get_user(intf_id, &data.interfaces[i])) {
+                       ret = -EFAULT;
+                       goto err_get_user;
+               }
+
+               intfs[i] = idr_find(&dev->interfaces, intf_id);
+               if (!intfs[i]) {
+                       ret = -EINVAL;
+                       goto err_get_user;
+               }
+       }
+
+       if (data.n_bufs) {
+               bufs = kzalloc(sizeof(bufs[0]) * data.n_bufs, GFP_KERNEL);
+               if (!bufs) {
+                       ret = -ENOMEM;
+                       goto err_get_user;
+               }
+       }
+
+       for (i = 0; i < data.n_bufs; i++) {
+               ret = adf_buffer_import(dev, &data.bufs[i], &bufs[i]);
+               if (ret < 0) {
+                       memset(&bufs[i], 0, sizeof(bufs[i]));
+                       goto err_import;
+               }
+       }
+
+       if (data.custom_data_size) {
+               custom_data = kzalloc(data.custom_data_size, GFP_KERNEL);
+               if (!custom_data) {
+                       ret = -ENOMEM;
+                       goto err_import;
+               }
+
+               if (copy_from_user(custom_data, data.custom_data,
+                               data.custom_data_size)) {
+                       ret = -EFAULT;
+                       goto err_import;
+               }
+       }
+
+       if (put_user(complete_fence_fd, &arg->complete_fence)) {
+               ret = -EFAULT;
+               goto err_import;
+       }
+
+       complete_fence = adf_device_post_nocopy(dev, intfs, data.n_interfaces,
+                       bufs, data.n_bufs, custom_data, data.custom_data_size);
+       if (IS_ERR(complete_fence)) {
+               ret = PTR_ERR(complete_fence);
+               goto err_import;
+       }
+
+       sync_fence_install(complete_fence, complete_fence_fd);
+       return 0;
+
+err_import:
+       for (i = 0; i < data.n_bufs; i++)
+               adf_buffer_cleanup(&bufs[i]);
+
+err_get_user:
+       kfree(custom_data);
+       kfree(bufs);
+       kfree(intfs);
+       put_unused_fd(complete_fence_fd);
+       return ret;
+}
+
+static int adf_intf_simple_post_config(struct adf_interface *intf,
+               struct adf_simple_post_config __user *arg)
+{
+       struct adf_device *dev = intf->base.parent;
+       struct sync_fence *complete_fence;
+       int complete_fence_fd;
+       struct adf_buffer buf;
+       int ret = 0;
+
+       complete_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+       if (complete_fence_fd < 0)
+               return complete_fence_fd;
+
+       ret = adf_buffer_import(dev, &arg->buf, &buf);
+       if (ret < 0)
+               goto err_import;
+
+       if (put_user(complete_fence_fd, &arg->complete_fence)) {
+               ret = -EFAULT;
+               goto err_put_user;
+       }
+
+       complete_fence = adf_interface_simple_post(intf, &buf);
+       if (IS_ERR(complete_fence)) {
+               ret = PTR_ERR(complete_fence);
+               goto err_put_user;
+       }
+
+       sync_fence_install(complete_fence, complete_fence_fd);
+       return 0;
+
+err_put_user:
+       adf_buffer_cleanup(&buf);
+err_import:
+       put_unused_fd(complete_fence_fd);
+       return ret;
+}
+
+static int adf_intf_simple_buffer_alloc(struct adf_interface *intf,
+               struct adf_simple_buffer_alloc __user *arg)
+{
+       struct adf_simple_buffer_alloc data;
+       struct dma_buf *dma_buf;
+       int ret = 0;
+
+       if (copy_from_user(&data, arg, sizeof(data)))
+               return -EFAULT;
+
+       data.fd = get_unused_fd_flags(O_CLOEXEC);
+       if (data.fd < 0)
+               return data.fd;
+
+       ret = adf_interface_simple_buffer_alloc(intf, data.w, data.h,
+                       data.format, &dma_buf, &data.offset, &data.pitch);
+       if (ret < 0)
+               goto err_alloc;
+
+       if (copy_to_user(arg, &data, sizeof(*arg))) {
+               ret = -EFAULT;
+               goto err_copy;
+       }
+
+       fd_install(data.fd, dma_buf->file);
+       return 0;
+
+err_copy:
+       dma_buf_put(dma_buf);
+
+err_alloc:
+       put_unused_fd(data.fd);
+       return ret;
+}
+
+static int adf_copy_attachment_list_to_user(
+               struct adf_attachment_config __user *to, size_t n_to,
+               struct adf_attachment *from, size_t n_from)
+{
+       struct adf_attachment_config *temp;
+       size_t n = min(n_to, n_from);
+       size_t i;
+       int ret = 0;
+
+       if (!n)
+               return 0;
+
+       temp = kzalloc(n * sizeof(temp[0]), GFP_KERNEL);
+       if (!temp)
+               return -ENOMEM;
+
+       for (i = 0; i < n; i++) {
+               temp[i].interface = from[i].interface->base.id;
+               temp[i].overlay_engine = from[i].overlay_engine->base.id;
+       }
+
+       if (copy_to_user(to, temp, n * sizeof(to[0]))) {
+               ret = -EFAULT;
+               goto done;
+       }
+
+done:
+       kfree(temp);
+       return ret;
+}
+
+static int adf_device_get_data(struct adf_device *dev,
+               struct adf_device_data __user *arg)
+{
+       struct adf_device_data data;
+       size_t n_attach;
+       struct adf_attachment *attach = NULL;
+       size_t n_allowed_attach;
+       struct adf_attachment *allowed_attach = NULL;
+       int ret = 0;
+
+       if (copy_from_user(&data, arg, sizeof(data)))
+               return -EFAULT;
+
+       if (data.n_attachments > ADF_MAX_ATTACHMENTS ||
+                       data.n_allowed_attachments > ADF_MAX_ATTACHMENTS)
+               return -EINVAL;
+
+       strlcpy(data.name, dev->base.name, sizeof(data.name));
+
+       if (data.n_attachments) {
+               attach = kzalloc(data.n_attachments * sizeof(attach[0]),
+                               GFP_KERNEL);
+               if (!attach)
+                       return -ENOMEM;
+       }
+       n_attach = adf_device_attachments(dev, attach, data.n_attachments);
+
+       if (data.n_allowed_attachments) {
+               allowed_attach = kzalloc(data.n_allowed_attachments *
+                               sizeof(allowed_attach[0]), GFP_KERNEL);
+               if (!allowed_attach) {
+                       ret = -ENOMEM;
+                       goto done;
+               }
+       }
+       n_allowed_attach = adf_device_attachments_allowed(dev, allowed_attach,
+                       data.n_allowed_attachments);
+
+       mutex_lock(&dev->client_lock);
+       ret = adf_obj_copy_custom_data_to_user(&dev->base, data.custom_data,
+                       &data.custom_data_size);
+       mutex_unlock(&dev->client_lock);
+
+       if (ret < 0)
+               goto done;
+
+       ret = adf_copy_attachment_list_to_user(data.attachments,
+                       data.n_attachments, attach, n_attach);
+       if (ret < 0)
+               goto done;
+
+       ret = adf_copy_attachment_list_to_user(data.allowed_attachments,
+                       data.n_allowed_attachments, allowed_attach,
+                       n_allowed_attach);
+       if (ret < 0)
+               goto done;
+
+       data.n_attachments = n_attach;
+       data.n_allowed_attachments = n_allowed_attach;
+
+       if (copy_to_user(arg, &data, sizeof(data)))
+               ret = -EFAULT;
+
+done:
+       kfree(allowed_attach);
+       kfree(attach);
+       return ret;
+}
+
+static int adf_device_handle_attachment(struct adf_device *dev,
+               struct adf_attachment_config __user *arg, bool attach)
+{
+       struct adf_attachment_config data;
+       struct adf_overlay_engine *eng;
+       struct adf_interface *intf;
+
+       if (copy_from_user(&data, arg, sizeof(data)))
+               return -EFAULT;
+
+       eng = idr_find(&dev->overlay_engines, data.overlay_engine);
+       if (!eng) {
+               dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
+                               data.overlay_engine);
+               return -EINVAL;
+       }
+
+       intf = idr_find(&dev->interfaces, data.interface);
+       if (!intf) {
+               dev_err(&dev->base.dev, "invalid interface id %u\n",
+                               data.interface);
+               return -EINVAL;
+       }
+
+       if (attach)
+               return adf_device_attach(dev, eng, intf);
+       else
+               return adf_device_detach(dev, eng, intf);
+}
+
+static int adf_intf_set_mode(struct adf_interface *intf,
+               struct drm_mode_modeinfo __user *arg)
+{
+       struct drm_mode_modeinfo mode;
+
+       if (copy_from_user(&mode, arg, sizeof(mode)))
+               return -EFAULT;
+
+       return adf_interface_set_mode(intf, &mode);
+}
+
+static int adf_intf_get_data(struct adf_interface *intf,
+               struct adf_interface_data __user *arg)
+{
+       struct adf_device *dev = adf_interface_parent(intf);
+       struct adf_interface_data data;
+       struct drm_mode_modeinfo *modelist;
+       size_t modelist_size;
+       int err;
+       int ret = 0;
+       unsigned long flags;
+
+       if (copy_from_user(&data, arg, sizeof(data)))
+               return -EFAULT;
+
+       strlcpy(data.name, intf->base.name, sizeof(data.name));
+
+       data.type = intf->type;
+       data.id = intf->idx;
+       data.flags = intf->flags;
+
+       err = adf_interface_get_screen_size(intf, &data.width_mm,
+                       &data.height_mm);
+       if (err < 0) {
+               data.width_mm = 0;
+               data.height_mm = 0;
+       }
+
+       modelist = kmalloc(sizeof(modelist[0]) * ADF_MAX_MODES, GFP_KERNEL);
+       if (!modelist)
+               return -ENOMEM;
+
+       mutex_lock(&dev->client_lock);
+       read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+       data.hotplug_detect = intf->hotplug_detect;
+       modelist_size = min(data.n_available_modes, intf->n_modes) *
+                       sizeof(intf->modelist[0]);
+       memcpy(modelist, intf->modelist, modelist_size);
+       data.n_available_modes = intf->n_modes;
+       read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+       if (copy_to_user(data.available_modes, modelist, modelist_size)) {
+               ret = -EFAULT;
+               goto done;
+       }
+
+       data.dpms_state = intf->dpms_state;
+       memcpy(&data.current_mode, &intf->current_mode,
+                       sizeof(intf->current_mode));
+
+       ret = adf_obj_copy_custom_data_to_user(&intf->base, data.custom_data,
+                       &data.custom_data_size);
+done:
+       mutex_unlock(&dev->client_lock);
+       kfree(modelist);
+
+       if (ret < 0)
+               return ret;
+
+       if (copy_to_user(arg, &data, sizeof(data)))
+               ret = -EFAULT;
+
+       return ret;
+}
+
+static inline long adf_obj_custom_ioctl(struct adf_obj *obj, unsigned int cmd,
+               unsigned long arg)
+{
+       if (obj->ops && obj->ops->ioctl)
+               return obj->ops->ioctl(obj, cmd, arg);
+       return -ENOTTY;
+}
+
+static long adf_overlay_engine_ioctl(struct adf_overlay_engine *eng,
+               struct adf_file *file, unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case ADF_SET_EVENT:
+               return adf_obj_set_event(&eng->base, file,
+                               (struct adf_set_event __user *)arg);
+
+       case ADF_GET_OVERLAY_ENGINE_DATA:
+               return adf_eng_get_data(eng,
+                       (struct adf_overlay_engine_data __user *)arg);
+
+       case ADF_BLANK:
+       case ADF_POST_CONFIG:
+       case ADF_SET_MODE:
+       case ADF_GET_DEVICE_DATA:
+       case ADF_GET_INTERFACE_DATA:
+       case ADF_SIMPLE_POST_CONFIG:
+       case ADF_SIMPLE_BUFFER_ALLOC:
+       case ADF_ATTACH:
+       case ADF_DETACH:
+               return -EINVAL;
+
+       default:
+               return adf_obj_custom_ioctl(&eng->base, cmd, arg);
+       }
+}
+
+static long adf_interface_ioctl(struct adf_interface *intf,
+               struct adf_file *file, unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case ADF_SET_EVENT:
+               return adf_obj_set_event(&intf->base, file,
+                               (struct adf_set_event __user *)arg);
+
+       case ADF_BLANK:
+               return adf_interface_blank(intf, arg);
+
+       case ADF_SET_MODE:
+               return adf_intf_set_mode(intf,
+                               (struct drm_mode_modeinfo __user *)arg);
+
+       case ADF_GET_INTERFACE_DATA:
+               return adf_intf_get_data(intf,
+                               (struct adf_interface_data __user *)arg);
+
+       case ADF_SIMPLE_POST_CONFIG:
+               return adf_intf_simple_post_config(intf,
+                               (struct adf_simple_post_config __user *)arg);
+
+       case ADF_SIMPLE_BUFFER_ALLOC:
+               return adf_intf_simple_buffer_alloc(intf,
+                               (struct adf_simple_buffer_alloc __user *)arg);
+
+       case ADF_POST_CONFIG:
+       case ADF_GET_DEVICE_DATA:
+       case ADF_GET_OVERLAY_ENGINE_DATA:
+       case ADF_ATTACH:
+       case ADF_DETACH:
+               return -EINVAL;
+
+       default:
+               return adf_obj_custom_ioctl(&intf->base, cmd, arg);
+       }
+}
+
+static long adf_device_ioctl(struct adf_device *dev, struct adf_file *file,
+               unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case ADF_SET_EVENT:
+               return adf_obj_set_event(&dev->base, file,
+                               (struct adf_set_event __user *)arg);
+
+       case ADF_POST_CONFIG:
+               return adf_device_post_config(dev,
+                               (struct adf_post_config __user *)arg);
+
+       case ADF_GET_DEVICE_DATA:
+               return adf_device_get_data(dev,
+                               (struct adf_device_data __user *)arg);
+
+       case ADF_ATTACH:
+               return adf_device_handle_attachment(dev,
+                               (struct adf_attachment_config __user *)arg,
+                               true);
+
+       case ADF_DETACH:
+               return adf_device_handle_attachment(dev,
+                               (struct adf_attachment_config __user *)arg,
+                               false);
+
+       case ADF_BLANK:
+       case ADF_SET_MODE:
+       case ADF_GET_INTERFACE_DATA:
+       case ADF_GET_OVERLAY_ENGINE_DATA:
+       case ADF_SIMPLE_POST_CONFIG:
+       case ADF_SIMPLE_BUFFER_ALLOC:
+               return -EINVAL;
+
+       default:
+               return adf_obj_custom_ioctl(&dev->base, cmd, arg);
+       }
+}
+
+static int adf_file_open(struct inode *inode, struct file *file)
+{
+       struct adf_obj *obj;
+       struct adf_file *fpriv = NULL;
+       unsigned long flags;
+       int ret = 0;
+
+       obj = adf_obj_sysfs_find(iminor(inode));
+       if (!obj)
+               return -ENODEV;
+
+       dev_dbg(&obj->dev, "opening %s\n", dev_name(&obj->dev));
+
+       if (!try_module_get(obj->parent->ops->owner)) {
+               dev_err(&obj->dev, "getting owner module failed\n");
+               return -ENODEV;
+       }
+
+       fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+       if (!fpriv) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       INIT_LIST_HEAD(&fpriv->head);
+       fpriv->obj = obj;
+       init_waitqueue_head(&fpriv->event_wait);
+
+       file->private_data = fpriv;
+
+       if (obj->ops && obj->ops->open) {
+               ret = obj->ops->open(obj, inode, file);
+               if (ret < 0)
+                       goto done;
+       }
+
+       spin_lock_irqsave(&obj->file_lock, flags);
+       list_add_tail(&fpriv->head, &obj->file_list);
+       spin_unlock_irqrestore(&obj->file_lock, flags);
+
+done:
+       if (ret < 0) {
+               kfree(fpriv);
+               module_put(obj->parent->ops->owner);
+       }
+       return ret;
+}
+
+static int adf_file_release(struct inode *inode, struct file *file)
+{
+       struct adf_file *fpriv = file->private_data;
+       struct adf_obj *obj = fpriv->obj;
+       enum adf_event_type event_type;
+       unsigned long flags;
+
+       if (obj->ops && obj->ops->release)
+               obj->ops->release(obj, inode, file);
+
+       spin_lock_irqsave(&obj->file_lock, flags);
+       list_del(&fpriv->head);
+       spin_unlock_irqrestore(&obj->file_lock, flags);
+
+       for_each_set_bit(event_type, fpriv->event_subscriptions,
+                       ADF_EVENT_TYPE_MAX) {
+               adf_event_put(obj, event_type);
+       }
+
+       kfree(fpriv);
+       module_put(obj->parent->ops->owner);
+
+       dev_dbg(&obj->dev, "released %s\n", dev_name(&obj->dev));
+       return 0;
+}
+
+long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       struct adf_file *fpriv = file->private_data;
+       struct adf_obj *obj = fpriv->obj;
+       long ret = -EINVAL;
+
+       dev_dbg(&obj->dev, "%s ioctl %u\n", dev_name(&obj->dev), _IOC_NR(cmd));
+
+       switch (obj->type) {
+       case ADF_OBJ_OVERLAY_ENGINE:
+               ret = adf_overlay_engine_ioctl(adf_obj_to_overlay_engine(obj),
+                               fpriv, cmd, arg);
+               break;
+
+       case ADF_OBJ_INTERFACE:
+               ret = adf_interface_ioctl(adf_obj_to_interface(obj), fpriv, cmd,
+                               arg);
+               break;
+
+       case ADF_OBJ_DEVICE:
+               ret = adf_device_ioctl(adf_obj_to_device(obj), fpriv, cmd, arg);
+               break;
+       }
+
+       return ret;
+}
+
+static inline bool adf_file_event_available(struct adf_file *fpriv)
+{
+       int head = fpriv->event_head;
+       int tail = fpriv->event_tail;
+       return CIRC_CNT(head, tail, sizeof(fpriv->event_buf)) != 0;
+}
+
+void adf_file_queue_event(struct adf_file *fpriv, struct adf_event *event)
+{
+       int head = fpriv->event_head;
+       int tail = fpriv->event_tail;
+       size_t space = CIRC_SPACE(head, tail, sizeof(fpriv->event_buf));
+       size_t space_to_end =
+                       CIRC_SPACE_TO_END(head, tail, sizeof(fpriv->event_buf));
+
+       if (space < event->length) {
+               dev_dbg(&fpriv->obj->dev,
+                               "insufficient buffer space for event %u\n",
+                               event->type);
+               return;
+       }
+
+       if (space_to_end >= event->length) {
+               memcpy(fpriv->event_buf + head, event, event->length);
+       } else {
+               memcpy(fpriv->event_buf + head, event, space_to_end);
+               memcpy(fpriv->event_buf, (u8 *)event + space_to_end,
+                               event->length - space_to_end);
+       }
+
+       smp_wmb();
+       fpriv->event_head = (fpriv->event_head + event->length) &
+                       (sizeof(fpriv->event_buf) - 1);
+       wake_up_interruptible_all(&fpriv->event_wait);
+}
+
+static ssize_t adf_file_copy_to_user(struct adf_file *fpriv,
+               char __user *buffer, size_t buffer_size)
+{
+       int head, tail;
+       u8 *event_buf;
+       size_t cnt, cnt_to_end, copy_size = 0;
+       ssize_t ret = 0;
+       unsigned long flags;
+
+       event_buf = kmalloc(min(buffer_size, sizeof(fpriv->event_buf)),
+                       GFP_KERNEL);
+       if (!event_buf)
+               return -ENOMEM;
+
+       spin_lock_irqsave(&fpriv->obj->file_lock, flags);
+
+       if (!adf_file_event_available(fpriv))
+               goto out;
+
+       head = fpriv->event_head;
+       tail = fpriv->event_tail;
+
+       cnt = CIRC_CNT(head, tail, sizeof(fpriv->event_buf));
+       cnt_to_end = CIRC_CNT_TO_END(head, tail, sizeof(fpriv->event_buf));
+       copy_size = min(buffer_size, cnt);
+
+       if (cnt_to_end >= copy_size) {
+               memcpy(event_buf, fpriv->event_buf + tail, copy_size);
+       } else {
+               memcpy(event_buf, fpriv->event_buf + tail, cnt_to_end);
+               memcpy(event_buf + cnt_to_end, fpriv->event_buf,
+                               copy_size - cnt_to_end);
+       }
+
+       fpriv->event_tail = (fpriv->event_tail + copy_size) &
+                       (sizeof(fpriv->event_buf) - 1);
+
+out:
+       spin_unlock_irqrestore(&fpriv->obj->file_lock, flags);
+       if (copy_size) {
+               if (copy_to_user(buffer, event_buf, copy_size))
+                       ret = -EFAULT;
+               else
+                       ret = copy_size;
+       }
+       kfree(event_buf);
+       return ret;
+}
+
+ssize_t adf_file_read(struct file *filp, char __user *buffer,
+                size_t count, loff_t *offset)
+{
+       struct adf_file *fpriv = filp->private_data;
+       int err;
+
+       err = wait_event_interruptible(fpriv->event_wait,
+                       adf_file_event_available(fpriv));
+       if (err < 0)
+               return err;
+
+       return adf_file_copy_to_user(fpriv, buffer, count);
+}
+
+unsigned int adf_file_poll(struct file *filp, struct poll_table_struct *wait)
+{
+       struct adf_file *fpriv = filp->private_data;
+       unsigned int mask = 0;
+
+       poll_wait(filp, &fpriv->event_wait, wait);
+
+       if (adf_file_event_available(fpriv))
+               mask |= POLLIN | POLLRDNORM;
+
+       return mask;
+}
+
+const struct file_operations adf_fops = {
+       .owner = THIS_MODULE,
+       .unlocked_ioctl = adf_file_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = adf_file_compat_ioctl,
+#endif
+       .open = adf_file_open,
+       .release = adf_file_release,
+       .llseek = default_llseek,
+       .read = adf_file_read,
+       .poll = adf_file_poll,
+};
diff --git a/drivers/video/adf/adf_fops.h b/drivers/video/adf/adf_fops.h
new file mode 100644 (file)
index 0000000..90a3a74
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_FOPS_H
+#define __VIDEO_ADF_ADF_FOPS_H
+
+#include <linux/bitmap.h>
+#include <linux/fs.h>
+
+extern const struct file_operations adf_fops;
+
+struct adf_file {
+       struct list_head head;
+       struct adf_obj *obj;
+
+       DECLARE_BITMAP(event_subscriptions, ADF_EVENT_TYPE_MAX);
+       u8 event_buf[4096];
+       int event_head;
+       int event_tail;
+       wait_queue_head_t event_wait;
+};
+
+void adf_file_queue_event(struct adf_file *file, struct adf_event *event);
+long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+#endif /* __VIDEO_ADF_ADF_FOPS_H */
diff --git a/drivers/video/adf/adf_fops32.c b/drivers/video/adf/adf_fops32.c
new file mode 100644 (file)
index 0000000..d299a81
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/uaccess.h>
+#include <video/adf.h>
+
+#include "adf_fops.h"
+#include "adf_fops32.h"
+
+long adf_compat_post_config(struct file *file,
+               struct adf_post_config32 __user *arg)
+{
+       struct adf_post_config32 cfg32;
+       struct adf_post_config __user *cfg;
+       int ret;
+
+       if (copy_from_user(&cfg32, arg, sizeof(cfg32)))
+               return -EFAULT;
+
+       cfg = compat_alloc_user_space(sizeof(*cfg));
+       if (!access_ok(VERIFY_WRITE, cfg, sizeof(*cfg)))
+               return -EFAULT;
+
+       if (put_user(cfg32.n_interfaces, &cfg->n_interfaces) ||
+                       put_user(compat_ptr(cfg32.interfaces),
+                                       &cfg->interfaces) ||
+                       put_user(cfg32.n_bufs, &cfg->n_bufs) ||
+                       put_user(compat_ptr(cfg32.bufs), &cfg->bufs) ||
+                       put_user(cfg32.custom_data_size,
+                                       &cfg->custom_data_size) ||
+                       put_user(compat_ptr(cfg32.custom_data),
+                                       &cfg->custom_data))
+               return -EFAULT;
+
+       ret = adf_file_ioctl(file, ADF_POST_CONFIG, (unsigned long)cfg);
+       if (ret < 0)
+               return ret;
+
+       if (copy_in_user(&arg->complete_fence, &cfg->complete_fence,
+                       sizeof(cfg->complete_fence)))
+               return -EFAULT;
+
+       return 0;
+}
+
+long adf_compat_get_device_data(struct file *file,
+               struct adf_device_data32 __user *arg)
+{
+       struct adf_device_data32 data32;
+       struct adf_device_data __user *data;
+       int ret;
+
+       if (copy_from_user(&data32, arg, sizeof(data32)))
+               return -EFAULT;
+
+       data = compat_alloc_user_space(sizeof(*data));
+       if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+               return -EFAULT;
+
+       if (put_user(data32.n_attachments, &data->n_attachments) ||
+                       put_user(compat_ptr(data32.attachments),
+                                       &data->attachments) ||
+                       put_user(data32.n_allowed_attachments,
+                                       &data->n_allowed_attachments) ||
+                       put_user(compat_ptr(data32.allowed_attachments),
+                                       &data->allowed_attachments) ||
+                       put_user(data32.custom_data_size,
+                                       &data->custom_data_size) ||
+                       put_user(compat_ptr(data32.custom_data),
+                                       &data->custom_data))
+               return -EFAULT;
+
+       ret = adf_file_ioctl(file, ADF_GET_DEVICE_DATA, (unsigned long)data);
+       if (ret < 0)
+               return ret;
+
+       if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+                       copy_in_user(&arg->n_attachments, &data->n_attachments,
+                                       sizeof(arg->n_attachments)) ||
+                       copy_in_user(&arg->n_allowed_attachments,
+                                       &data->n_allowed_attachments,
+                                       sizeof(arg->n_allowed_attachments)) ||
+                       copy_in_user(&arg->custom_data_size,
+                                       &data->custom_data_size,
+                                       sizeof(arg->custom_data_size)))
+               return -EFAULT;
+
+       return 0;
+}
+
+long adf_compat_get_interface_data(struct file *file,
+               struct adf_interface_data32 __user *arg)
+{
+       struct adf_interface_data32 data32;
+       struct adf_interface_data __user *data;
+       int ret;
+
+       if (copy_from_user(&data32, arg, sizeof(data32)))
+               return -EFAULT;
+
+       data = compat_alloc_user_space(sizeof(*data));
+       if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+               return -EFAULT;
+
+       if (put_user(data32.n_available_modes, &data->n_available_modes) ||
+                       put_user(compat_ptr(data32.available_modes),
+                                       &data->available_modes) ||
+                       put_user(data32.custom_data_size,
+                                       &data->custom_data_size) ||
+                       put_user(compat_ptr(data32.custom_data),
+                                       &data->custom_data))
+               return -EFAULT;
+
+       ret = adf_file_ioctl(file, ADF_GET_INTERFACE_DATA, (unsigned long)data);
+       if (ret < 0)
+               return ret;
+
+       if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+                       copy_in_user(&arg->type, &data->type,
+                                       sizeof(arg->type)) ||
+                       copy_in_user(&arg->id, &data->id, sizeof(arg->id)) ||
+                       copy_in_user(&arg->flags, &data->flags,
+                                       sizeof(arg->flags)) ||
+                       copy_in_user(&arg->dpms_state, &data->dpms_state,
+                                       sizeof(arg->dpms_state)) ||
+                       copy_in_user(&arg->hotplug_detect,
+                                       &data->hotplug_detect,
+                                       sizeof(arg->hotplug_detect)) ||
+                       copy_in_user(&arg->width_mm, &data->width_mm,
+                                       sizeof(arg->width_mm)) ||
+                       copy_in_user(&arg->height_mm, &data->height_mm,
+                                       sizeof(arg->height_mm)) ||
+                       copy_in_user(&arg->current_mode, &data->current_mode,
+                                       sizeof(arg->current_mode)) ||
+                       copy_in_user(&arg->n_available_modes,
+                                       &data->n_available_modes,
+                                       sizeof(arg->n_available_modes)) ||
+                       copy_in_user(&arg->custom_data_size,
+                                       &data->custom_data_size,
+                                       sizeof(arg->custom_data_size)))
+               return -EFAULT;
+
+       return 0;
+}
+
+long adf_compat_get_overlay_engine_data(struct file *file,
+               struct adf_overlay_engine_data32 __user *arg)
+{
+       struct adf_overlay_engine_data32 data32;
+       struct adf_overlay_engine_data __user *data;
+       int ret;
+
+       if (copy_from_user(&data32, arg, sizeof(data32)))
+               return -EFAULT;
+
+       data = compat_alloc_user_space(sizeof(*data));
+       if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+               return -EFAULT;
+
+       if (put_user(data32.n_supported_formats, &data->n_supported_formats) ||
+                       put_user(compat_ptr(data32.supported_formats),
+                                       &data->supported_formats) ||
+                       put_user(data32.custom_data_size,
+                                       &data->custom_data_size) ||
+                       put_user(compat_ptr(data32.custom_data),
+                                       &data->custom_data))
+               return -EFAULT;
+
+       ret = adf_file_ioctl(file, ADF_GET_OVERLAY_ENGINE_DATA,
+                       (unsigned long)data);
+       if (ret < 0)
+               return ret;
+
+       if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+                       copy_in_user(&arg->n_supported_formats,
+                                       &data->n_supported_formats,
+                                       sizeof(arg->n_supported_formats)) ||
+                       copy_in_user(&arg->custom_data_size,
+                                       &data->custom_data_size,
+                                       sizeof(arg->custom_data_size)))
+               return -EFAULT;
+
+       return 0;
+}
+
+long adf_file_compat_ioctl(struct file *file, unsigned int cmd,
+               unsigned long arg)
+{
+       switch (cmd) {
+       case ADF_POST_CONFIG32:
+               return adf_compat_post_config(file, compat_ptr(arg));
+
+       case ADF_GET_DEVICE_DATA32:
+               return adf_compat_get_device_data(file, compat_ptr(arg));
+
+       case ADF_GET_INTERFACE_DATA32:
+               return adf_compat_get_interface_data(file, compat_ptr(arg));
+
+       case ADF_GET_OVERLAY_ENGINE_DATA32:
+               return adf_compat_get_overlay_engine_data(file,
+                               compat_ptr(arg));
+
+       default:
+               return adf_file_ioctl(file, cmd, arg);
+       }
+}
diff --git a/drivers/video/adf/adf_fops32.h b/drivers/video/adf/adf_fops32.h
new file mode 100644 (file)
index 0000000..64034ce
--- /dev/null
@@ -0,0 +1,78 @@
+#ifndef __VIDEO_ADF_ADF_FOPS32_H
+#define __VIDEO_ADF_ADF_FOPS32_H
+
+#include <linux/compat.h>
+#include <linux/ioctl.h>
+
+#include <video/adf.h>
+
+#define ADF_POST_CONFIG32 \
+               _IOW(ADF_IOCTL_TYPE, 2, struct adf_post_config32)
+#define ADF_GET_DEVICE_DATA32 \
+               _IOR(ADF_IOCTL_TYPE, 4, struct adf_device_data32)
+#define ADF_GET_INTERFACE_DATA32 \
+               _IOR(ADF_IOCTL_TYPE, 5, struct adf_interface_data32)
+#define ADF_GET_OVERLAY_ENGINE_DATA32 \
+               _IOR(ADF_IOCTL_TYPE, 6, struct adf_overlay_engine_data32)
+
+struct adf_post_config32 {
+       compat_size_t n_interfaces;
+       compat_uptr_t interfaces;
+
+       compat_size_t n_bufs;
+       compat_uptr_t bufs;
+
+       compat_size_t custom_data_size;
+       compat_uptr_t custom_data;
+
+       __s32 complete_fence;
+};
+
+struct adf_device_data32 {
+       char name[ADF_NAME_LEN];
+
+       compat_size_t n_attachments;
+       compat_uptr_t attachments;
+
+       compat_size_t n_allowed_attachments;
+       compat_uptr_t allowed_attachments;
+
+       compat_size_t custom_data_size;
+       compat_uptr_t custom_data;
+};
+
+struct adf_interface_data32 {
+       char name[ADF_NAME_LEN];
+
+       __u8 type;
+       __u32 id;
+       /* e.g. type=ADF_INTF_TYPE_DSI, id=1 => DSI.1 */
+       __u32 flags;
+
+       __u8 dpms_state;
+       __u8 hotplug_detect;
+       __u16 width_mm;
+       __u16 height_mm;
+
+       struct drm_mode_modeinfo current_mode;
+       compat_size_t n_available_modes;
+       compat_uptr_t available_modes;
+
+       compat_size_t custom_data_size;
+       compat_uptr_t custom_data;
+};
+
+struct adf_overlay_engine_data32 {
+       char name[ADF_NAME_LEN];
+
+       compat_size_t n_supported_formats;
+       compat_uptr_t supported_formats;
+
+       compat_size_t custom_data_size;
+       compat_uptr_t custom_data;
+};
+
+long adf_file_compat_ioctl(struct file *file, unsigned int cmd,
+               unsigned long arg);
+
+#endif /* __VIDEO_ADF_ADF_FOPS32_H */
diff --git a/drivers/video/adf/adf_format.c b/drivers/video/adf/adf_format.c
new file mode 100644 (file)
index 0000000..e3f22c7
--- /dev/null
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ * modified from drivers/gpu/drm/drm_crtc.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <drm/drm_fourcc.h>
+#include <video/adf_format.h>
+
+bool adf_format_is_standard(u32 format)
+{
+       switch (format) {
+       case DRM_FORMAT_C8:
+       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_BGR233:
+       case DRM_FORMAT_XRGB4444:
+       case DRM_FORMAT_XBGR4444:
+       case DRM_FORMAT_RGBX4444:
+       case DRM_FORMAT_BGRX4444:
+       case DRM_FORMAT_ARGB4444:
+       case DRM_FORMAT_ABGR4444:
+       case DRM_FORMAT_RGBA4444:
+       case DRM_FORMAT_BGRA4444:
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_XBGR1555:
+       case DRM_FORMAT_RGBX5551:
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_ARGB1555:
+       case DRM_FORMAT_ABGR1555:
+       case DRM_FORMAT_RGBA5551:
+       case DRM_FORMAT_BGRA5551:
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_RGBX8888:
+       case DRM_FORMAT_BGRX8888:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_RGBA8888:
+       case DRM_FORMAT_BGRA8888:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_RGBX1010102:
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_ARGB2101010:
+       case DRM_FORMAT_ABGR2101010:
+       case DRM_FORMAT_RGBA1010102:
+       case DRM_FORMAT_BGRA1010102:
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+       case DRM_FORMAT_AYUV:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV444:
+       case DRM_FORMAT_YVU444:
+               return true;
+       default:
+               return false;
+       }
+}
+EXPORT_SYMBOL(adf_format_is_standard);
+
+bool adf_format_is_rgb(u32 format)
+{
+       switch (format) {
+       case DRM_FORMAT_C8:
+       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_BGR233:
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_XBGR1555:
+       case DRM_FORMAT_RGBX5551:
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_ARGB1555:
+       case DRM_FORMAT_ABGR1555:
+       case DRM_FORMAT_RGBA5551:
+       case DRM_FORMAT_BGRA5551:
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_RGBX8888:
+       case DRM_FORMAT_BGRX8888:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_RGBX1010102:
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_ARGB2101010:
+       case DRM_FORMAT_ABGR2101010:
+       case DRM_FORMAT_RGBA1010102:
+       case DRM_FORMAT_BGRA1010102:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_RGBA8888:
+       case DRM_FORMAT_BGRA8888:
+               return true;
+
+       default:
+               return false;
+       }
+}
+EXPORT_SYMBOL(adf_format_is_rgb);
+
+u8 adf_format_num_planes(u32 format)
+{
+       switch (format) {
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV444:
+       case DRM_FORMAT_YVU444:
+               return 3;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+               return 2;
+       default:
+               return 1;
+       }
+}
+EXPORT_SYMBOL(adf_format_num_planes);
+
+u8 adf_format_bpp(u32 format)
+{
+       switch (format) {
+       case DRM_FORMAT_C8:
+       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_BGR233:
+               return 8;
+
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_XBGR1555:
+       case DRM_FORMAT_RGBX5551:
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_ARGB1555:
+       case DRM_FORMAT_ABGR1555:
+       case DRM_FORMAT_RGBA5551:
+       case DRM_FORMAT_BGRA5551:
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
+               return 16;
+
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
+               return 24;
+
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_RGBX8888:
+       case DRM_FORMAT_BGRX8888:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_RGBX1010102:
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_ARGB2101010:
+       case DRM_FORMAT_ABGR2101010:
+       case DRM_FORMAT_RGBA1010102:
+       case DRM_FORMAT_BGRA1010102:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_RGBA8888:
+       case DRM_FORMAT_BGRA8888:
+               return 32;
+
+       default:
+               pr_debug("%s: unsupported pixel format %u\n", __func__, format);
+               return 0;
+       }
+}
+EXPORT_SYMBOL(adf_format_bpp);
+
+u8 adf_format_plane_cpp(u32 format, int plane)
+{
+       if (plane >= adf_format_num_planes(format))
+               return 0;
+
+       switch (format) {
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+               return 2;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+               return plane ? 2 : 1;
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV444:
+       case DRM_FORMAT_YVU444:
+               return 1;
+       default:
+               return adf_format_bpp(format) / 8;
+       }
+}
+EXPORT_SYMBOL(adf_format_plane_cpp);
+
+u8 adf_format_horz_chroma_subsampling(u32 format)
+{
+       switch (format) {
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+               return 4;
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               return 2;
+       default:
+               return 1;
+       }
+}
+EXPORT_SYMBOL(adf_format_horz_chroma_subsampling);
+
+u8 adf_format_vert_chroma_subsampling(u32 format)
+{
+       switch (format) {
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+               return 4;
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+               return 2;
+       default:
+               return 1;
+       }
+}
+EXPORT_SYMBOL(adf_format_vert_chroma_subsampling);
diff --git a/drivers/video/adf/adf_memblock.c b/drivers/video/adf/adf_memblock.c
new file mode 100644 (file)
index 0000000..285218a
--- /dev/null
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+
+struct adf_memblock_pdata {
+       phys_addr_t base;
+};
+
+static struct sg_table *adf_memblock_map(struct dma_buf_attachment *attach,
+               enum dma_data_direction direction)
+{
+       struct adf_memblock_pdata *pdata = attach->dmabuf->priv;
+       unsigned long pfn = PFN_DOWN(pdata->base);
+       struct page *page = pfn_to_page(pfn);
+       struct sg_table *table;
+       int nents, ret;
+
+       table = kzalloc(sizeof(*table), GFP_KERNEL);
+       if (!table)
+               return ERR_PTR(-ENOMEM);
+
+       ret = sg_alloc_table(table, 1, GFP_KERNEL);
+       if (ret < 0)
+               goto err_alloc;
+
+       sg_set_page(table->sgl, page, attach->dmabuf->size, 0);
+
+       nents = dma_map_sg(attach->dev, table->sgl, 1, direction);
+       if (!nents) {
+               ret = -EINVAL;
+               goto err_map;
+       }
+
+       return table;
+
+err_map:
+       sg_free_table(table);
+err_alloc:
+       kfree(table);
+       return ERR_PTR(ret);
+}
+
+static void adf_memblock_unmap(struct dma_buf_attachment *attach,
+               struct sg_table *table, enum dma_data_direction direction)
+{
+       dma_unmap_sg(attach->dev, table->sgl, 1, direction);
+       sg_free_table(table);
+}
+
+static void __init_memblock adf_memblock_release(struct dma_buf *buf)
+{
+       struct adf_memblock_pdata *pdata = buf->priv;
+       int err = memblock_free(pdata->base, buf->size);
+
+       if (err < 0)
+               pr_warn("%s: freeing memblock failed: %d\n", __func__, err);
+       kfree(pdata);
+}
+
+static void *adf_memblock_do_kmap(struct dma_buf *buf, unsigned long pgoffset,
+               bool atomic)
+{
+       struct adf_memblock_pdata *pdata = buf->priv;
+       unsigned long pfn = PFN_DOWN(pdata->base) + pgoffset;
+       struct page *page = pfn_to_page(pfn);
+
+       if (atomic)
+               return kmap_atomic(page);
+       else
+               return kmap(page);
+}
+
+static void *adf_memblock_kmap_atomic(struct dma_buf *buf,
+               unsigned long pgoffset)
+{
+       return adf_memblock_do_kmap(buf, pgoffset, true);
+}
+
+static void adf_memblock_kunmap_atomic(struct dma_buf *buf,
+               unsigned long pgoffset, void *vaddr)
+{
+       kunmap_atomic(vaddr);
+}
+
+static void *adf_memblock_kmap(struct dma_buf *buf, unsigned long pgoffset)
+{
+       return adf_memblock_do_kmap(buf, pgoffset, false);
+}
+
+static void adf_memblock_kunmap(struct dma_buf *buf, unsigned long pgoffset,
+               void *vaddr)
+{
+       kunmap(vaddr);
+}
+
+static int adf_memblock_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+       struct adf_memblock_pdata *pdata = buf->priv;
+
+       return remap_pfn_range(vma, vma->vm_start, PFN_DOWN(pdata->base),
+                       vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
+struct dma_buf_ops adf_memblock_ops = {
+       .map_dma_buf = adf_memblock_map,
+       .unmap_dma_buf = adf_memblock_unmap,
+       .release = adf_memblock_release,
+       .kmap_atomic = adf_memblock_kmap_atomic,
+       .kunmap_atomic = adf_memblock_kunmap_atomic,
+       .kmap = adf_memblock_kmap,
+       .kunmap = adf_memblock_kunmap,
+       .mmap = adf_memblock_mmap,
+};
+
+/**
+ * adf_memblock_export - export a memblock reserved area as a dma-buf
+ *
+ * @base: base physical address
+ * @size: memblock size
+ * @flags: mode flags for the dma-buf's file
+ *
+ * @base and @size must be page-aligned.
+ *
+ * Returns a dma-buf on success or ERR_PTR(-errno) on failure.
+ */
+struct dma_buf *adf_memblock_export(phys_addr_t base, size_t size, int flags)
+{
+       struct adf_memblock_pdata *pdata;
+       struct dma_buf *buf;
+       DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+       if (PAGE_ALIGN(base) != base || PAGE_ALIGN(size) != size)
+               return ERR_PTR(-EINVAL);
+
+       pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return ERR_PTR(-ENOMEM);
+
+       pdata->base = base;
+       exp_info.ops = &adf_memblock_ops;
+       exp_info.size = size;
+       exp_info.flags = flags;
+       exp_info.priv = pdata;
+
+       buf = dma_buf_export(&exp_info);
+       if (IS_ERR(buf))
+               kfree(pdata);
+
+       return buf;
+}
+EXPORT_SYMBOL(adf_memblock_export);
diff --git a/drivers/video/adf/adf_sysfs.c b/drivers/video/adf/adf_sysfs.c
new file mode 100644 (file)
index 0000000..8c659c7
--- /dev/null
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <video/adf_client.h>
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+static struct class *adf_class;
+static int adf_major;
+static DEFINE_IDR(adf_minors);
+
+#define dev_to_adf_interface(p) \
+       adf_obj_to_interface(container_of(p, struct adf_obj, dev))
+
+static ssize_t dpms_state_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct adf_interface *intf = dev_to_adf_interface(dev);
+       return scnprintf(buf, PAGE_SIZE, "%u\n",
+                       adf_interface_dpms_state(intf));
+}
+
+static ssize_t dpms_state_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct adf_interface *intf = dev_to_adf_interface(dev);
+       u8 dpms_state;
+       int err;
+
+       err = kstrtou8(buf, 0, &dpms_state);
+       if (err < 0)
+               return err;
+
+       err = adf_interface_blank(intf, dpms_state);
+       if (err < 0)
+               return err;
+
+       return count;
+}
+
+static ssize_t current_mode_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct adf_interface *intf = dev_to_adf_interface(dev);
+       struct drm_mode_modeinfo mode;
+
+       adf_interface_current_mode(intf, &mode);
+
+       if (mode.name[0]) {
+               return scnprintf(buf, PAGE_SIZE, "%s\n", mode.name);
+       } else {
+               bool interlaced = !!(mode.flags & DRM_MODE_FLAG_INTERLACE);
+               return scnprintf(buf, PAGE_SIZE, "%ux%u%s\n", mode.hdisplay,
+                               mode.vdisplay, interlaced ? "i" : "");
+       }
+}
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct adf_interface *intf = dev_to_adf_interface(dev);
+       return scnprintf(buf, PAGE_SIZE, "%s\n",
+                       adf_interface_type_str(intf));
+}
+
+static ssize_t vsync_timestamp_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct adf_interface *intf = dev_to_adf_interface(dev);
+       ktime_t timestamp;
+       unsigned long flags;
+
+       read_lock_irqsave(&intf->vsync_lock, flags);
+       memcpy(&timestamp, &intf->vsync_timestamp, sizeof(timestamp));
+       read_unlock_irqrestore(&intf->vsync_lock, flags);
+
+       return scnprintf(buf, PAGE_SIZE, "%llu\n", ktime_to_ns(timestamp));
+}
+
+static ssize_t hotplug_detect_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct adf_interface *intf = dev_to_adf_interface(dev);
+       return scnprintf(buf, PAGE_SIZE, "%u\n", intf->hotplug_detect);
+}
+
+static struct device_attribute adf_interface_attrs[] = {
+       __ATTR(dpms_state, S_IRUGO|S_IWUSR, dpms_state_show, dpms_state_store),
+       __ATTR_RO(current_mode),
+       __ATTR_RO(hotplug_detect),
+       __ATTR_RO(type),
+       __ATTR_RO(vsync_timestamp),
+};
+
+int adf_obj_sysfs_init(struct adf_obj *obj, struct device *parent)
+{
+       int ret = idr_alloc(&adf_minors, obj, 0, 0, GFP_KERNEL);
+       if (ret < 0) {
+               pr_err("%s: allocating adf minor failed: %d\n", __func__,
+                               ret);
+               return ret;
+       }
+
+       obj->minor = ret;
+       obj->dev.parent = parent;
+       obj->dev.class = adf_class;
+       obj->dev.devt = MKDEV(adf_major, obj->minor);
+
+       ret = device_register(&obj->dev);
+       if (ret < 0) {
+               pr_err("%s: registering adf object failed: %d\n", __func__,
+                               ret);
+               goto err_device_register;
+       }
+
+       return 0;
+
+err_device_register:
+       idr_remove(&adf_minors, obj->minor);
+       return ret;
+}
+
+static char *adf_device_devnode(struct device *dev, umode_t *mode,
+               kuid_t *uid, kgid_t *gid)
+{
+       struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+       return kasprintf(GFP_KERNEL, "adf%d", obj->id);
+}
+
+static char *adf_interface_devnode(struct device *dev, umode_t *mode,
+               kuid_t *uid, kgid_t *gid)
+{
+       struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+       struct adf_interface *intf = adf_obj_to_interface(obj);
+       struct adf_device *parent = adf_interface_parent(intf);
+       return kasprintf(GFP_KERNEL, "adf-interface%d.%d",
+                       parent->base.id, intf->base.id);
+}
+
+static char *adf_overlay_engine_devnode(struct device *dev, umode_t *mode,
+               kuid_t *uid, kgid_t *gid)
+{
+       struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+       struct adf_overlay_engine *eng = adf_obj_to_overlay_engine(obj);
+       struct adf_device *parent = adf_overlay_engine_parent(eng);
+       return kasprintf(GFP_KERNEL, "adf-overlay-engine%d.%d",
+                       parent->base.id, eng->base.id);
+}
+
+static void adf_noop_release(struct device *dev)
+{
+}
+
+static struct device_type adf_device_type = {
+       .name = "adf_device",
+       .devnode = adf_device_devnode,
+       .release = adf_noop_release,
+};
+
+static struct device_type adf_interface_type = {
+       .name = "adf_interface",
+       .devnode = adf_interface_devnode,
+       .release = adf_noop_release,
+};
+
+static struct device_type adf_overlay_engine_type = {
+       .name = "adf_overlay_engine",
+       .devnode = adf_overlay_engine_devnode,
+       .release = adf_noop_release,
+};
+
+int adf_device_sysfs_init(struct adf_device *dev)
+{
+       dev->base.dev.type = &adf_device_type;
+       dev_set_name(&dev->base.dev, "%s", dev->base.name);
+       return adf_obj_sysfs_init(&dev->base, dev->dev);
+}
+
+int adf_interface_sysfs_init(struct adf_interface *intf)
+{
+       struct adf_device *parent = adf_interface_parent(intf);
+       size_t i, j;
+       int ret;
+
+       intf->base.dev.type = &adf_interface_type;
+       dev_set_name(&intf->base.dev, "%s-interface%d", parent->base.name,
+                       intf->base.id);
+
+       ret = adf_obj_sysfs_init(&intf->base, &parent->base.dev);
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++) {
+               ret = device_create_file(&intf->base.dev,
+                               &adf_interface_attrs[i]);
+               if (ret < 0) {
+                       dev_err(&intf->base.dev, "creating sysfs attribute %s failed: %d\n",
+                                       adf_interface_attrs[i].attr.name, ret);
+                       goto err;
+               }
+       }
+
+       return 0;
+
+err:
+       for (j = 0; j < i; j++)
+               device_remove_file(&intf->base.dev, &adf_interface_attrs[j]);
+       return ret;
+}
+
+int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng)
+{
+       struct adf_device *parent = adf_overlay_engine_parent(eng);
+
+       eng->base.dev.type = &adf_overlay_engine_type;
+       dev_set_name(&eng->base.dev, "%s-overlay-engine%d", parent->base.name,
+                       eng->base.id);
+
+       return adf_obj_sysfs_init(&eng->base, &parent->base.dev);
+}
+
+struct adf_obj *adf_obj_sysfs_find(int minor)
+{
+       return idr_find(&adf_minors, minor);
+}
+
+void adf_obj_sysfs_destroy(struct adf_obj *obj)
+{
+       idr_remove(&adf_minors, obj->minor);
+       device_unregister(&obj->dev);
+}
+
+void adf_device_sysfs_destroy(struct adf_device *dev)
+{
+       adf_obj_sysfs_destroy(&dev->base);
+}
+
+void adf_interface_sysfs_destroy(struct adf_interface *intf)
+{
+       size_t i;
+
+       for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++)
+               device_remove_file(&intf->base.dev, &adf_interface_attrs[i]);
+       adf_obj_sysfs_destroy(&intf->base);
+}
+
+void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng)
+{
+       adf_obj_sysfs_destroy(&eng->base);
+}
+
+int adf_sysfs_init(void)
+{
+       struct class *class;
+       int ret;
+
+       class = class_create(THIS_MODULE, "adf");
+       if (IS_ERR(class)) {
+               ret = PTR_ERR(class);
+               pr_err("%s: creating class failed: %d\n", __func__, ret);
+               return ret;
+       }
+
+       ret = register_chrdev(0, "adf", &adf_fops);
+       if (ret < 0) {
+               pr_err("%s: registering device failed: %d\n", __func__, ret);
+               goto err_chrdev;
+       }
+
+       adf_class = class;
+       adf_major = ret;
+       return 0;
+
+err_chrdev:
+       class_destroy(adf_class);
+       return ret;
+}
+
+void adf_sysfs_destroy(void)
+{
+       idr_destroy(&adf_minors);
+       class_destroy(adf_class);
+}
diff --git a/drivers/video/adf/adf_sysfs.h b/drivers/video/adf/adf_sysfs.h
new file mode 100644 (file)
index 0000000..0613ac3
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_SYSFS_H
+#define __VIDEO_ADF_ADF_SYSFS_H
+
+struct adf_device;
+struct adf_interface;
+struct adf_overlay_engine;
+
+int adf_device_sysfs_init(struct adf_device *dev);
+void adf_device_sysfs_destroy(struct adf_device *dev);
+int adf_interface_sysfs_init(struct adf_interface *intf);
+void adf_interface_sysfs_destroy(struct adf_interface *intf);
+int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng);
+void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng);
+struct adf_obj *adf_obj_sysfs_find(int minor);
+
+int adf_sysfs_init(void);
+void adf_sysfs_destroy(void);
+
+#endif /* __VIDEO_ADF_ADF_SYSFS_H */
diff --git a/drivers/video/adf/adf_trace.h b/drivers/video/adf/adf_trace.h
new file mode 100644 (file)
index 0000000..3cb2a84
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM adf
+
+#if !defined(__VIDEO_ADF_ADF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __VIDEO_ADF_ADF_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <video/adf.h>
+
+TRACE_EVENT(adf_event,
+       TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+       TP_ARGS(obj, type),
+
+       TP_STRUCT__entry(
+               __string(name, obj->name)
+               __field(enum adf_event_type, type)
+               __array(char, type_str, 32)
+       ),
+       TP_fast_assign(
+               __assign_str(name, obj->name);
+               __entry->type = type;
+               strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+                               sizeof(__entry->type_str));
+       ),
+       TP_printk("obj=%s type=%u (%s)",
+                       __get_str(name),
+                       __entry->type,
+                       __entry->type_str)
+);
+
+TRACE_EVENT(adf_event_enable,
+       TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+       TP_ARGS(obj, type),
+
+       TP_STRUCT__entry(
+               __string(name, obj->name)
+               __field(enum adf_event_type, type)
+               __array(char, type_str, 32)
+       ),
+       TP_fast_assign(
+               __assign_str(name, obj->name);
+               __entry->type = type;
+               strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+                               sizeof(__entry->type_str));
+       ),
+       TP_printk("obj=%s type=%u (%s)",
+                       __get_str(name),
+                       __entry->type,
+                       __entry->type_str)
+);
+
+TRACE_EVENT(adf_event_disable,
+       TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+       TP_ARGS(obj, type),
+
+       TP_STRUCT__entry(
+               __string(name, obj->name)
+               __field(enum adf_event_type, type)
+               __array(char, type_str, 32)
+       ),
+       TP_fast_assign(
+               __assign_str(name, obj->name);
+               __entry->type = type;
+               strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+                               sizeof(__entry->type_str));
+       ),
+       TP_printk("obj=%s type=%u (%s)",
+                       __get_str(name),
+                       __entry->type,
+                       __entry->type_str)
+);
+
+#endif /* __VIDEO_ADF_ADF_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE adf_trace
+#include <trace/define_trace.h>
index 7f6c9e6cfc6c99d8d9912db5d2f78242923f51c5..1e56b50e408234f826fd28fdaa1bee0807911dcf 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/platform_device.h>
+#include <linux/acpi.h>
 
 enum {
        FB_GET_WIDTH        = 0x00,
@@ -234,7 +235,7 @@ static int goldfish_fb_probe(struct platform_device *pdev)
        fb->fb.var.activate     = FB_ACTIVATE_NOW;
        fb->fb.var.height       = readl(fb->reg_base + FB_GET_PHYS_HEIGHT);
        fb->fb.var.width        = readl(fb->reg_base + FB_GET_PHYS_WIDTH);
-       fb->fb.var.pixclock     = 10000;
+       fb->fb.var.pixclock     = 0;
 
        fb->fb.var.red.offset = 11;
        fb->fb.var.red.length = 5;
@@ -304,12 +305,25 @@ static int goldfish_fb_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id goldfish_fb_of_match[] = {
+       { .compatible = "google,goldfish-fb", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_fb_of_match);
+
+static const struct acpi_device_id goldfish_fb_acpi_match[] = {
+       { "GFSH0004", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_fb_acpi_match);
 
 static struct platform_driver goldfish_fb_driver = {
        .probe          = goldfish_fb_probe,
        .remove         = goldfish_fb_remove,
        .driver = {
-               .name = "goldfish_fb"
+               .name = "goldfish_fb",
+               .of_match_table = goldfish_fb_of_match,
+               .acpi_match_table = ACPI_PTR(goldfish_fb_acpi_match),
        }
 };
 
index b05e8fefbabd98d379797b6dfc05744ad248a584..03b2f8f416073afb41faa4f13c117589dc7d34c6 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/slab.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/ds2482.h>
 #include <asm/delay.h>
 
 #include "../w1.h"
@@ -84,7 +86,8 @@ static const u8 ds2482_chan_rd[8] =
 static int ds2482_probe(struct i2c_client *client,
                        const struct i2c_device_id *id);
 static int ds2482_remove(struct i2c_client *client);
-
+static int ds2482_suspend(struct device *dev);
+static int ds2482_resume(struct device *dev);
 
 /**
  * Driver data (common to all clients)
@@ -95,9 +98,15 @@ static const struct i2c_device_id ds2482_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, ds2482_id);
 
+static const struct dev_pm_ops ds2482_pm_ops = {
+       .suspend = ds2482_suspend,
+       .resume = ds2482_resume,
+};
+
 static struct i2c_driver ds2482_driver = {
        .driver = {
                .name   = "ds2482",
+               .pm = &ds2482_pm_ops,
        },
        .probe          = ds2482_probe,
        .remove         = ds2482_remove,
@@ -119,6 +128,7 @@ struct ds2482_w1_chan {
 struct ds2482_data {
        struct i2c_client       *client;
        struct mutex            access_lock;
+       int                     slpz_gpio;
 
        /* 1-wire interface(s) */
        int                     w1_count;       /* 1 or 8 */
@@ -444,11 +454,31 @@ static u8 ds2482_w1_set_pullup(void *data, int delay)
        return retval;
 }
 
+static int ds2482_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ds2482_data *data = i2c_get_clientdata(client);
+
+       if (data->slpz_gpio >= 0)
+               gpio_set_value(data->slpz_gpio, 0);
+       return 0;
+}
+
+static int ds2482_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ds2482_data *data = i2c_get_clientdata(client);
+
+       if (data->slpz_gpio >= 0)
+               gpio_set_value(data->slpz_gpio, 1);
+       return 0;
+}
 
 static int ds2482_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
        struct ds2482_data *data;
+       struct ds2482_platform_data *pdata;
        int err = -ENODEV;
        int temp1;
        int idx;
@@ -515,6 +545,16 @@ static int ds2482_probe(struct i2c_client *client,
                }
        }
 
+       pdata = client->dev.platform_data;
+       data->slpz_gpio = pdata ? pdata->slpz_gpio : -1;
+
+       if (data->slpz_gpio >= 0) {
+               err = gpio_request_one(data->slpz_gpio, GPIOF_OUT_INIT_HIGH,
+                                      "ds2482.slpz");
+               if (err < 0)
+                       goto exit_w1_remove;
+       }
+
        return 0;
 
 exit_w1_remove:
@@ -539,6 +579,11 @@ static int ds2482_remove(struct i2c_client *client)
                        w1_remove_master_device(&data->w1_ch[idx].w1_bm);
        }
 
+       if (data->slpz_gpio >= 0) {
+               gpio_set_value(data->slpz_gpio, 0);
+               gpio_free(data->slpz_gpio);
+       }
+
        /* Free the memory */
        kfree(data);
        return 0;
index 6ce72d8d1ee12473bfcb1e550967be2a0a1c8d27..a5d2dc39ba07589b58d38522bf79ba46ba0f9eed 100644 (file)
@@ -199,6 +199,7 @@ if MISC_FILESYSTEMS
 source "fs/adfs/Kconfig"
 source "fs/affs/Kconfig"
 source "fs/ecryptfs/Kconfig"
+source "fs/sdcardfs/Kconfig"
 source "fs/hfs/Kconfig"
 source "fs/hfsplus/Kconfig"
 source "fs/befs/Kconfig"
index 79f522575cba3e79e6909ca4c1c055d2cb54ce9a..3b54070cd6293e723c32cb184f20f39c1de6b2d8 100644 (file)
@@ -3,7 +3,7 @@
 #
 # 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
 # Rewritten to use lists instead of if-statements.
-# 
+#
 
 obj-y :=       open.o read_write.o file_table.o super.o \
                char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
@@ -59,7 +59,7 @@ obj-y                         += devpts/
 
 obj-$(CONFIG_PROFILING)                += dcookies.o
 obj-$(CONFIG_DLM)              += dlm/
+
 # Do not add any filesystems before this line
 obj-$(CONFIG_FSCACHE)          += fscache/
 obj-$(CONFIG_REISERFS_FS)      += reiserfs/
@@ -81,6 +81,7 @@ obj-$(CONFIG_ISO9660_FS)      += isofs/
 obj-$(CONFIG_HFSPLUS_FS)       += hfsplus/ # Before hfs to find wrapped HFS+
 obj-$(CONFIG_HFS_FS)           += hfs/
 obj-$(CONFIG_ECRYPT_FS)                += ecryptfs/
+obj-$(CONFIG_SDCARD_FS)                += sdcardfs/
 obj-$(CONFIG_VXFS_FS)          += freevxfs/
 obj-$(CONFIG_NFS_FS)           += nfs/
 obj-$(CONFIG_EXPORTFS)         += exportfs/
index 3a93755e880fee23fa6d8370916caefb21f2c9eb..0c52941dd62c0d8ccb6d16b41472d9d6ad28fe5f 100644 (file)
@@ -651,7 +651,7 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
 
        if ((current->flags & PF_RANDOMIZE) &&
                !(current->personality & ADDR_NO_RANDOMIZE)) {
-               random_variable = (unsigned long) get_random_int();
+               random_variable = get_random_long();
                random_variable &= STACK_RND_MASK;
                random_variable <<= PAGE_SHIFT;
        }
index 71b6056ad35dc0765c5d188ba30f2c3534495327..9e5099997fcd8da473bbdf50211873108c4f7194 100644 (file)
@@ -3023,6 +3023,7 @@ char *d_absolute_path(const struct path *path,
                return ERR_PTR(error);
        return res;
 }
+EXPORT_SYMBOL(d_absolute_path);
 
 /*
  * same as __d_path but appends "(deleted)" for unlinked files.
index 1e009cad8d5cac61832ebb3916033780f2fec1a6..3ab9c68b8bcee45fee7386140194da2f7d7b859d 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/mutex.h>
 #include <linux/anon_inodes.h>
 #include <linux/device.h>
+#include <linux/freezer.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/mman.h>
@@ -1587,7 +1588,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 {
        int res = 0, eavail, timed_out = 0;
        unsigned long flags;
-       long slack = 0;
+       u64 slack = 0;
        wait_queue_t wait;
        ktime_t expires, *to = NULL;
 
@@ -1634,7 +1635,8 @@ fetch_events:
                        }
 
                        spin_unlock_irqrestore(&ep->lock, flags);
-                       if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
+                       if (!freezable_schedule_hrtimeout_range(to, slack,
+                                                               HRTIMER_MODE_ABS))
                                timed_out = 1;
 
                        spin_lock_irqsave(&ep->lock, flags);
index cd5914495ad71433556cf6cd26a26a54486aebef..9f31991a5e05c9f4b0cd413d7d954cb98f833fa9 100644 (file)
@@ -2465,7 +2465,8 @@ extern int ext4_mb_add_groupinfo(struct super_block *sb,
                ext4_group_t i, struct ext4_group_desc *desc);
 extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
                                ext4_fsblk_t block, unsigned long count);
-extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
+extern int ext4_trim_fs(struct super_block *, struct fstrim_range *,
+                               unsigned long blkdev_flags);
 
 /* inode.c */
 int ext4_inode_is_fast_symlink(struct inode *inode);
index d884989cc83dd99238a710f8131ab38b1139c7ca..af34979684a4df6f7f35f22a4cf30bcb57007a1b 100644 (file)
@@ -18,6 +18,7 @@
 #include "ext4.h"
 #include "xattr.h"
 #include "truncate.h"
+#include <trace/events/android_fs.h>
 
 #define EXT4_XATTR_SYSTEM_DATA "data"
 #define EXT4_MIN_INLINE_DATA_SIZE      ((sizeof(__le32) * EXT4_N_BLOCKS))
@@ -500,6 +501,9 @@ int ext4_readpage_inline(struct inode *inode, struct page *page)
                return -EAGAIN;
        }
 
+       trace_android_fs_dataread_start(inode, page_offset(page), PAGE_SIZE,
+                                       current->pid, current->comm);
+
        /*
         * Current inline data can only exist in the 1st page,
         * So for all the other pages, just set them uptodate.
@@ -511,6 +515,8 @@ int ext4_readpage_inline(struct inode *inode, struct page *page)
                SetPageUptodate(page);
        }
 
+       trace_android_fs_dataread_end(inode, page_offset(page), PAGE_SIZE);
+
        up_read(&EXT4_I(inode)->xattr_sem);
 
        unlock_page(page);
index c71d2941a45be1f1d2eeaed661a7f34af0293866..a4149cc2c6c0e329e2060ce83b5644bf46cfcf50 100644 (file)
@@ -44,6 +44,7 @@
 #include "truncate.h"
 
 #include <trace/events/ext4.h>
+#include <trace/events/android_fs.h>
 
 #define MPAGE_DA_EXTENT_TAIL 0x01
 
@@ -1016,6 +1017,8 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
        pgoff_t index;
        unsigned from, to;
 
+       trace_android_fs_datawrite_start(inode, pos, len,
+                                        current->pid, current->comm);
        trace_ext4_write_begin(inode, pos, len, flags);
        /*
         * Reserve one block more for addition to orphan list in case
@@ -1152,6 +1155,7 @@ static int ext4_write_end(struct file *file,
        int ret = 0, ret2;
        int i_size_changed = 0;
 
+       trace_android_fs_datawrite_end(inode, pos, len);
        trace_ext4_write_end(inode, pos, len, copied);
        if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) {
                ret = ext4_jbd2_file_inode(handle, inode);
@@ -1260,6 +1264,7 @@ static int ext4_journalled_write_end(struct file *file,
        unsigned from, to;
        int size_changed = 0;
 
+       trace_android_fs_datawrite_end(inode, pos, len);
        trace_ext4_journalled_write_end(inode, pos, len, copied);
        from = pos & (PAGE_CACHE_SIZE - 1);
        to = from + len;
@@ -2727,6 +2732,8 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
                                        len, flags, pagep, fsdata);
        }
        *fsdata = (void *)0;
+       trace_android_fs_datawrite_start(inode, pos, len,
+                                        current->pid, current->comm);
        trace_ext4_da_write_begin(inode, pos, len, flags);
 
        if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
@@ -2845,6 +2852,7 @@ static int ext4_da_write_end(struct file *file,
                return ext4_write_end(file, mapping, pos,
                                      len, copied, page, fsdata);
 
+       trace_android_fs_datawrite_end(inode, pos, len);
        trace_ext4_da_write_end(inode, pos, len, copied);
        start = pos & (PAGE_CACHE_SIZE - 1);
        end = start + copied - 1;
@@ -3333,12 +3341,31 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
        if (ext4_has_inline_data(inode))
                return 0;
 
+       if (trace_android_fs_dataread_start_enabled() &&
+           (iov_iter_rw(iter) == READ))
+               trace_android_fs_dataread_start(inode, offset, count,
+                                               current->pid,
+                                               current->comm);
+       if (trace_android_fs_datawrite_start_enabled() &&
+           (iov_iter_rw(iter) == WRITE))
+               trace_android_fs_datawrite_start(inode, offset, count,
+                                                current->pid,
+                                                current->comm);
+
        trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
                ret = ext4_ext_direct_IO(iocb, iter, offset);
        else
                ret = ext4_ind_direct_IO(iocb, iter, offset);
        trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
+
+       if (trace_android_fs_dataread_start_enabled() &&
+           (iov_iter_rw(iter) == READ))
+               trace_android_fs_dataread_end(inode, offset, count);
+       if (trace_android_fs_datawrite_start_enabled() &&
+           (iov_iter_rw(iter) == WRITE))
+               trace_android_fs_datawrite_end(inode, offset, count);
+
        return ret;
 }
 
index 1fb12f9c97a6b70467700c03327f442a1a273481..7e974878d9a9ca1774acf59059bf9cd88ebf890e 100644 (file)
@@ -587,11 +587,13 @@ resizefs_out:
                return err;
        }
 
+       case FIDTRIM:
        case FITRIM:
        {
                struct request_queue *q = bdev_get_queue(sb->s_bdev);
                struct fstrim_range range;
                int ret = 0;
+               int flags  = cmd == FIDTRIM ? BLKDEV_DISCARD_SECURE : 0;
 
                if (!capable(CAP_SYS_ADMIN))
                        return -EPERM;
@@ -599,13 +601,15 @@ resizefs_out:
                if (!blk_queue_discard(q))
                        return -EOPNOTSUPP;
 
+               if ((flags & BLKDEV_DISCARD_SECURE) && !blk_queue_secdiscard(q))
+                       return -EOPNOTSUPP;
                if (copy_from_user(&range, (struct fstrim_range __user *)arg,
                    sizeof(range)))
                        return -EFAULT;
 
                range.minlen = max((unsigned int)range.minlen,
                                   q->limits.discard_granularity);
-               ret = ext4_trim_fs(sb, &range);
+               ret = ext4_trim_fs(sb, &range, flags);
                if (ret < 0)
                        return ret;
 
index 3c7f0c44cfb361c85127a7c4593ba41ee3e48a3e..0b1c97875686bbe2732c0dbb9cb2a131c4d52c80 100644 (file)
@@ -2770,7 +2770,8 @@ int ext4_mb_release(struct super_block *sb)
 }
 
 static inline int ext4_issue_discard(struct super_block *sb,
-               ext4_group_t block_group, ext4_grpblk_t cluster, int count)
+               ext4_group_t block_group, ext4_grpblk_t cluster, int count,
+               unsigned long flags)
 {
        ext4_fsblk_t discard_block;
 
@@ -2779,7 +2780,7 @@ static inline int ext4_issue_discard(struct super_block *sb,
        count = EXT4_C2B(EXT4_SB(sb), count);
        trace_ext4_discard_blocks(sb,
                        (unsigned long long) discard_block, count);
-       return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
+       return sb_issue_discard(sb, discard_block, count, GFP_NOFS, flags);
 }
 
 /*
@@ -2801,7 +2802,7 @@ static void ext4_free_data_callback(struct super_block *sb,
        if (test_opt(sb, DISCARD)) {
                err = ext4_issue_discard(sb, entry->efd_group,
                                         entry->efd_start_cluster,
-                                        entry->efd_count);
+                                        entry->efd_count, 0);
                if (err && err != -EOPNOTSUPP)
                        ext4_msg(sb, KERN_WARNING, "discard request in"
                                 " group:%d block:%d count:%d failed"
@@ -4846,7 +4847,8 @@ do_more:
                 * them with group lock_held
                 */
                if (test_opt(sb, DISCARD)) {
-                       err = ext4_issue_discard(sb, block_group, bit, count);
+                       err = ext4_issue_discard(sb, block_group, bit, count,
+                                                0);
                        if (err && err != -EOPNOTSUPP)
                                ext4_msg(sb, KERN_WARNING, "discard request in"
                                         " group:%d block:%d count:%lu failed"
@@ -5042,13 +5044,15 @@ error_return:
  * @count:     number of blocks to TRIM
  * @group:     alloc. group we are working with
  * @e4b:       ext4 buddy for the group
+ * @blkdev_flags: flags for the block device
  *
  * Trim "count" blocks starting at "start" in the "group". To assure that no
  * one will allocate those blocks, mark it as used in buddy bitmap. This must
  * be called with under the group lock.
  */
 static int ext4_trim_extent(struct super_block *sb, int start, int count,
-                            ext4_group_t group, struct ext4_buddy *e4b)
+                           ext4_group_t group, struct ext4_buddy *e4b,
+                           unsigned long blkdev_flags)
 __releases(bitlock)
 __acquires(bitlock)
 {
@@ -5069,7 +5073,7 @@ __acquires(bitlock)
         */
        mb_mark_used(e4b, &ex);
        ext4_unlock_group(sb, group);
-       ret = ext4_issue_discard(sb, group, start, count);
+       ret = ext4_issue_discard(sb, group, start, count, blkdev_flags);
        ext4_lock_group(sb, group);
        mb_free_blocks(NULL, e4b, start, ex.fe_len);
        return ret;
@@ -5082,6 +5086,7 @@ __acquires(bitlock)
  * @start:             first group block to examine
  * @max:               last group block to examine
  * @minblocks:         minimum extent block count
+ * @blkdev_flags:      flags for the block device
  *
  * ext4_trim_all_free walks through group's buddy bitmap searching for free
  * extents. When the free block is found, ext4_trim_extent is called to TRIM
@@ -5096,7 +5101,7 @@ __acquires(bitlock)
 static ext4_grpblk_t
 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
                   ext4_grpblk_t start, ext4_grpblk_t max,
-                  ext4_grpblk_t minblocks)
+                  ext4_grpblk_t minblocks, unsigned long blkdev_flags)
 {
        void *bitmap;
        ext4_grpblk_t next, count = 0, free_count = 0;
@@ -5129,7 +5134,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
 
                if ((next - start) >= minblocks) {
                        ret = ext4_trim_extent(sb, start,
-                                              next - start, group, &e4b);
+                                              next - start, group, &e4b,
+                                              blkdev_flags);
                        if (ret && ret != -EOPNOTSUPP)
                                break;
                        ret = 0;
@@ -5171,6 +5177,7 @@ out:
  * ext4_trim_fs() -- trim ioctl handle function
  * @sb:                        superblock for filesystem
  * @range:             fstrim_range structure
+ * @blkdev_flags:      flags for the block device
  *
  * start:      First Byte to trim
  * len:                number of Bytes to trim from start
@@ -5179,7 +5186,8 @@ out:
  * start to start+len. For each such a group ext4_trim_all_free function
  * is invoked to trim all free space.
  */
-int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range,
+                       unsigned long blkdev_flags)
 {
        struct ext4_group_info *grp;
        ext4_group_t group, first_group, last_group;
@@ -5235,7 +5243,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
 
                if (grp->bb_free >= minlen) {
                        cnt = ext4_trim_all_free(sb, group, first_cluster,
-                                               end, minlen);
+                                               end, minlen, blkdev_flags);
                        if (cnt < 0) {
                                ret = cnt;
                                break;
index 5dc5e95063de2a7e42749a94464f00f7c50be4b8..1ce24a6759a090de564813b6639e011da10b3271 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/cleancache.h>
 
 #include "ext4.h"
+#include <trace/events/android_fs.h>
 
 /*
  * Call ext4_decrypt on every single page, reusing the encryption
@@ -86,6 +87,17 @@ static inline bool ext4_bio_encrypted(struct bio *bio)
 #endif
 }
 
+static void
+ext4_trace_read_completion(struct bio *bio)
+{
+       struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+       if (first_page != NULL)
+               trace_android_fs_dataread_end(first_page->mapping->host,
+                                             page_offset(first_page),
+                                             bio->bi_iter.bi_size);
+}
+
 /*
  * I/O completion handler for multipage BIOs.
  *
@@ -103,6 +115,9 @@ static void mpage_end_io(struct bio *bio)
        struct bio_vec *bv;
        int i;
 
+       if (trace_android_fs_dataread_start_enabled())
+               ext4_trace_read_completion(bio);
+
        if (ext4_bio_encrypted(bio)) {
                struct ext4_crypto_ctx *ctx = bio->bi_private;
 
@@ -130,6 +145,24 @@ static void mpage_end_io(struct bio *bio)
        bio_put(bio);
 }
 
+static void
+ext4_submit_bio_read(struct bio *bio)
+{
+       if (trace_android_fs_dataread_start_enabled()) {
+               struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+               if (first_page != NULL) {
+                       trace_android_fs_dataread_start(
+                               first_page->mapping->host,
+                               page_offset(first_page),
+                               bio->bi_iter.bi_size,
+                               current->pid,
+                               current->comm);
+               }
+       }
+       submit_bio(READ, bio);
+}
+
 int ext4_mpage_readpages(struct address_space *mapping,
                         struct list_head *pages, struct page *page,
                         unsigned nr_pages)
@@ -271,7 +304,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
                 */
                if (bio && (last_block_in_bio != blocks[0] - 1)) {
                submit_and_realloc:
-                       submit_bio(READ, bio);
+                       ext4_submit_bio_read(bio);
                        bio = NULL;
                }
                if (bio == NULL) {
@@ -303,14 +336,14 @@ int ext4_mpage_readpages(struct address_space *mapping,
                if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
                     (relative_block == map.m_len)) ||
                    (first_hole != blocks_per_page)) {
-                       submit_bio(READ, bio);
+                       ext4_submit_bio_read(bio);
                        bio = NULL;
                } else
                        last_block_in_bio = blocks[blocks_per_page - 1];
                goto next_page;
        confused:
                if (bio) {
-                       submit_bio(READ, bio);
+                       ext4_submit_bio_read(bio);
                        bio = NULL;
                }
                if (!PageUptodate(page))
@@ -323,6 +356,6 @@ int ext4_mpage_readpages(struct address_space *mapping,
        }
        BUG_ON(pages && !list_empty(pages));
        if (bio)
-               submit_bio(READ, bio);
+               ext4_submit_bio_read(bio);
        return 0;
 }
index 972eab7ac07193da485df3efc6b6c11c99dacd97..e692958d6e7859995d970d0364fbd36b1a7abd08 100644 (file)
@@ -26,6 +26,7 @@
 #include "segment.h"
 #include "trace.h"
 #include <trace/events/f2fs.h>
+#include <trace/events/android_fs.h>
 
 static void f2fs_read_end_io(struct bio *bio)
 {
@@ -1401,6 +1402,8 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
        struct dnode_of_data dn;
        int err = 0;
 
+       trace_android_fs_datawrite_start(inode, pos, len,
+                                        current->pid, current->comm);
        trace_f2fs_write_begin(inode, pos, len, flags);
 
        f2fs_balance_fs(sbi);
@@ -1529,6 +1532,7 @@ static int f2fs_write_end(struct file *file,
 {
        struct inode *inode = page->mapping->host;
 
+       trace_android_fs_datawrite_end(inode, pos, len);
        trace_f2fs_write_end(inode, pos, len, copied);
 
        set_page_dirty(page);
@@ -1582,6 +1586,16 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
 
        trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 
+       if (trace_android_fs_dataread_start_enabled() &&
+           (iov_iter_rw(iter) == READ))
+               trace_android_fs_dataread_start(inode, offset,
+                                               count, current->pid,
+                                               current->comm);
+       if (trace_android_fs_datawrite_start_enabled() &&
+           (iov_iter_rw(iter) == WRITE))
+               trace_android_fs_datawrite_start(inode, offset, count,
+                                                current->pid, current->comm);
+
        if (iov_iter_rw(iter) == WRITE) {
                __allocate_data_blocks(inode, offset, count);
                if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
@@ -1595,6 +1609,13 @@ out:
        if (err < 0 && iov_iter_rw(iter) == WRITE)
                f2fs_write_failed(mapping, offset + count);
 
+       if (trace_android_fs_dataread_start_enabled() &&
+           (iov_iter_rw(iter) == READ))
+               trace_android_fs_dataread_end(inode, offset, count);
+       if (trace_android_fs_datawrite_start_enabled() &&
+           (iov_iter_rw(iter) == WRITE))
+               trace_android_fs_datawrite_end(inode, offset, count);
+
        trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
 
        return err;
index bda7126466c09f9b16f4d275cac5b287a06c9142..d2c5d69ba0b146bd2bd3a9b3a507f4e2655765ce 100644 (file)
@@ -13,6 +13,7 @@
 
 #include "f2fs.h"
 #include "node.h"
+#include <trace/events/android_fs.h>
 
 bool f2fs_may_inline_data(struct inode *inode)
 {
@@ -84,14 +85,22 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
 {
        struct page *ipage;
 
+       trace_android_fs_dataread_start(inode, page_offset(page),
+                                       PAGE_SIZE, current->pid,
+                                       current->comm);
+
        ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
        if (IS_ERR(ipage)) {
+               trace_android_fs_dataread_end(inode, page_offset(page),
+                                             PAGE_SIZE);
                unlock_page(page);
                return PTR_ERR(ipage);
        }
 
        if (!f2fs_has_inline_data(inode)) {
                f2fs_put_page(ipage, 1);
+               trace_android_fs_dataread_end(inode, page_offset(page),
+                                             PAGE_SIZE);
                return -EAGAIN;
        }
 
@@ -102,6 +111,8 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
 
        SetPageUptodate(page);
        f2fs_put_page(ipage, 1);
+       trace_android_fs_dataread_end(inode, page_offset(page),
+                                     PAGE_SIZE);
        unlock_page(page);
        return 0;
 }
index 60d6fc2e0e4b9f5ec4fabd5bfb257d91e9ee6b5e..de11206dda636029cb47fadc3e9c46b2075ce835 100644 (file)
@@ -2062,7 +2062,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
            (dirtytime && (inode->i_state & I_DIRTY_INODE)))
                return;
 
-       if (unlikely(block_dump))
+       if (unlikely(block_dump > 1))
                block_dump___mark_inode_dirty(inode);
 
        spin_lock(&inode->i_lock);
index ebb5e37455a07acd86f5fbf1b76d474e99b937fb..8932c06e40c14c8479de568274ec98316e85f00b 100644 (file)
 #include <linux/poll.h>
 #include <linux/uio.h>
 #include <linux/miscdevice.h>
+#include <linux/namei.h>
 #include <linux/pagemap.h>
 #include <linux/file.h>
 #include <linux/slab.h>
 #include <linux/pipe_fs_i.h>
 #include <linux/swap.h>
 #include <linux/splice.h>
+#include <linux/freezer.h>
 
 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
 MODULE_ALIAS("devname:fuse");
@@ -473,7 +475,9 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
         * Either request is already in userspace, or it was forced.
         * Wait it out.
         */
-       wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
+       while (!test_bit(FR_FINISHED, &req->flags))
+               wait_event_freezable(req->waitq,
+                               test_bit(FR_FINISHED, &req->flags));
 }
 
 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
@@ -1932,6 +1936,10 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
                cs->move_pages = 0;
 
        err = copy_out_args(cs, &req->out, nbytes);
+       if (req->in.h.opcode == FUSE_CANONICAL_PATH) {
+               req->out.h.error = kern_path((char *)req->out.args[0].value, 0,
+                                                       req->canonical_path);
+       }
        fuse_copy_finish(cs);
 
        spin_lock(&fpq->lock);
index 4b5f2c4e69c8aa1cb21f419f4a1052c517eb55b9..278caed7c36776426adc96d5f1e0274366470ed9 100644 (file)
@@ -267,6 +267,50 @@ invalid:
        goto out;
 }
 
+/*
+ * Get the canonical path. Since we must translate to a path, this must be done
+ * in the context of the userspace daemon, however, the userspace daemon cannot
+ * look up paths on its own. Instead, we handle the lookup as a special case
+ * inside of the write request.
+ */
+static void fuse_dentry_canonical_path(const struct path *path, struct path *canonical_path) {
+       struct inode *inode = path->dentry->d_inode;
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       struct fuse_req *req;
+       int err;
+       char *path_name;
+
+       req = fuse_get_req(fc, 1);
+       err = PTR_ERR(req);
+       if (IS_ERR(req))
+               goto default_path;
+
+       path_name = (char*)__get_free_page(GFP_KERNEL);
+       if (!path_name) {
+               fuse_put_request(fc, req);
+               goto default_path;
+       }
+
+       req->in.h.opcode = FUSE_CANONICAL_PATH;
+       req->in.h.nodeid = get_node_id(inode);
+       req->in.numargs = 0;
+       req->out.numargs = 1;
+       req->out.args[0].size = PATH_MAX;
+       req->out.args[0].value = path_name;
+       req->canonical_path = canonical_path;
+       req->out.argvar = 1;
+       fuse_request_send(fc, req);
+       err = req->out.h.error;
+       fuse_put_request(fc, req);
+       free_page((unsigned long)path_name);
+       if (!err)
+               return;
+default_path:
+       canonical_path->dentry = path->dentry;
+       canonical_path->mnt = path->mnt;
+       path_get(canonical_path);
+}
+
 static int invalid_nodeid(u64 nodeid)
 {
        return !nodeid || nodeid == FUSE_ROOT_ID;
@@ -274,6 +318,7 @@ static int invalid_nodeid(u64 nodeid)
 
 const struct dentry_operations fuse_dentry_operations = {
        .d_revalidate   = fuse_dentry_revalidate,
+       .d_canonical_path = fuse_dentry_canonical_path,
 };
 
 int fuse_valid_type(int m)
index 604cd42dafef2a41941febd3681699f35cdb5e52..644687ae04bd931285fa3ca894924c1506be1a21 100644 (file)
@@ -372,6 +372,9 @@ struct fuse_req {
        /** Inode used in the request or NULL */
        struct inode *inode;
 
+       /** Path used for completing d_canonical_path */
+       struct path *canonical_path;
+
        /** AIO control block */
        struct fuse_io_priv *io;
 
index 1480d3a180370fe3922a7724e613d09b896f9d00..5c65d8942692fd468558c9cac0052216af75a2bb 100644 (file)
 #include <linux/cleancache.h>
 #include "internal.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/android_fs.h>
+
+EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_start);
+EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_end);
+EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_start);
+EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_end);
+
 /*
  * I/O completion handler for multipage BIOs.
  *
@@ -47,6 +55,16 @@ static void mpage_end_io(struct bio *bio)
        struct bio_vec *bv;
        int i;
 
+       if (trace_android_fs_dataread_end_enabled() &&
+           (bio_data_dir(bio) == READ)) {
+               struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+               if (first_page != NULL)
+                       trace_android_fs_dataread_end(first_page->mapping->host,
+                                                     page_offset(first_page),
+                                                     bio->bi_iter.bi_size);
+       }
+
        bio_for_each_segment_all(bv, bio, i) {
                struct page *page = bv->bv_page;
                page_endio(page, bio_data_dir(bio), bio->bi_error);
@@ -57,6 +75,18 @@ static void mpage_end_io(struct bio *bio)
 
 static struct bio *mpage_bio_submit(int rw, struct bio *bio)
 {
+       if (trace_android_fs_dataread_start_enabled() && (rw == READ)) {
+               struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+               if (first_page != NULL) {
+                       trace_android_fs_dataread_start(
+                               first_page->mapping->host,
+                               page_offset(first_page),
+                               bio->bi_iter.bi_size,
+                               current->pid,
+                               current->comm);
+               }
+       }
        bio->bi_end_io = mpage_end_io;
        guard_bio_eod(rw, bio);
        submit_bio(rw, bio);
index b8d08d0d0a4dbe061b9336dd7f861d32e13c8618..e2893f17dde24e88b2ac9e17708179db4ce11911 100644 (file)
@@ -702,6 +702,8 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
        struct fsnotify_group *group;
        struct inode *inode;
        struct path path;
+       struct path alteredpath;
+       struct path *canonical_path = &path;
        struct fd f;
        int ret;
        unsigned flags = 0;
@@ -741,13 +743,22 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
        if (ret)
                goto fput_and_out;
 
+       /* support stacked filesystems */
+       if(path.dentry && path.dentry->d_op) {
+               if (path.dentry->d_op->d_canonical_path) {
+                       path.dentry->d_op->d_canonical_path(&path, &alteredpath);
+                       canonical_path = &alteredpath;
+                       path_put(&path);
+               }
+       }
+
        /* inode held in place by reference to path; group by fget on fd */
-       inode = path.dentry->d_inode;
+       inode = canonical_path->dentry->d_inode;
        group = f.file->private_data;
 
        /* create/update an inode mark */
        ret = inotify_update_watch(group, inode, mask);
-       path_put(&path);
+       path_put(canonical_path);
 fput_and_out:
        fdput(f);
        return ret;
index d2b8c754f627f55b7de6b4af51ef407a869c5276..0c9ea52ab3995829bd4b076bc6eba82297669ec8 100644 (file)
@@ -2240,6 +2240,92 @@ static const struct file_operations proc_timers_operations = {
        .release        = seq_release_private,
 };
 
+static ssize_t timerslack_ns_write(struct file *file, const char __user *buf,
+                                       size_t count, loff_t *offset)
+{
+       struct inode *inode = file_inode(file);
+       struct task_struct *p;
+       u64 slack_ns;
+       int err;
+
+       err = kstrtoull_from_user(buf, count, 10, &slack_ns);
+       if (err < 0)
+               return err;
+
+       p = get_proc_task(inode);
+       if (!p)
+               return -ESRCH;
+
+       if (p != current) {
+               if (!capable(CAP_SYS_NICE)) {
+                       count = -EPERM;
+                       goto out;
+               }
+
+               err = security_task_setscheduler(p);
+               if (err) {
+                       count = err;
+                       goto out;
+               }
+       }
+
+       task_lock(p);
+       if (slack_ns == 0)
+               p->timer_slack_ns = p->default_timer_slack_ns;
+       else
+               p->timer_slack_ns = slack_ns;
+       task_unlock(p);
+
+out:
+       put_task_struct(p);
+
+       return count;
+}
+
+static int timerslack_ns_show(struct seq_file *m, void *v)
+{
+       struct inode *inode = m->private;
+       struct task_struct *p;
+       int err = 0;
+
+       p = get_proc_task(inode);
+       if (!p)
+               return -ESRCH;
+
+       if (p != current) {
+
+               if (!capable(CAP_SYS_NICE)) {
+                       err = -EPERM;
+                       goto out;
+               }
+               err = security_task_getscheduler(p);
+               if (err)
+                       goto out;
+       }
+
+       task_lock(p);
+       seq_printf(m, "%llu\n", p->timer_slack_ns);
+       task_unlock(p);
+
+out:
+       put_task_struct(p);
+
+       return err;
+}
+
+static int timerslack_ns_open(struct inode *inode, struct file *filp)
+{
+       return single_open(filp, timerslack_ns_show, inode);
+}
+
+static const struct file_operations proc_pid_set_timerslack_ns_operations = {
+       .open           = timerslack_ns_open,
+       .read           = seq_read,
+       .write          = timerslack_ns_write,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 static int proc_pident_instantiate(struct inode *dir,
        struct dentry *dentry, struct task_struct *task, const void *ptr)
 {
@@ -2790,8 +2876,8 @@ static const struct pid_entry tgid_base_stuff[] = {
        ONE("cgroup",  S_IRUGO, proc_cgroup_show),
 #endif
        ONE("oom_score",  S_IRUGO, proc_oom_score),
-       REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adj_operations),
-       REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
+       REG("oom_adj",    S_IRUSR, proc_oom_adj_operations),
+       REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations),
 #ifdef CONFIG_AUDITSYSCALL
        REG("loginuid",   S_IWUSR|S_IRUGO, proc_loginuid_operations),
        REG("sessionid",  S_IRUGO, proc_sessionid_operations),
@@ -2817,6 +2903,7 @@ static const struct pid_entry tgid_base_stuff[] = {
 #ifdef CONFIG_CHECKPOINT_RESTORE
        REG("timers",     S_IRUGO, proc_timers_operations),
 #endif
+       REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations),
 };
 
 static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx)
@@ -3073,6 +3160,44 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
        return 0;
 }
 
+/*
+ * proc_tid_comm_permission is a special permission function exclusively
+ * used for the node /proc/<pid>/task/<tid>/comm.
+ * It bypasses generic permission checks in the case where a task of the same
+ * task group attempts to access the node.
+ * The rational behind this is that glibc and bionic access this node for
+ * cross thread naming (pthread_set/getname_np(!self)). However, if
+ * PR_SET_DUMPABLE gets set to 0 this node among others becomes uid=0 gid=0,
+ * which locks out the cross thread naming implementation.
+ * This function makes sure that the node is always accessible for members of
+ * same thread group.
+ */
+static int proc_tid_comm_permission(struct inode *inode, int mask)
+{
+       bool is_same_tgroup;
+       struct task_struct *task;
+
+       task = get_proc_task(inode);
+       if (!task)
+               return -ESRCH;
+       is_same_tgroup = same_thread_group(current, task);
+       put_task_struct(task);
+
+       if (likely(is_same_tgroup && !(mask & MAY_EXEC))) {
+               /* This file (/proc/<pid>/task/<tid>/comm) can always be
+                * read or written by the members of the corresponding
+                * thread group.
+                */
+               return 0;
+       }
+
+       return generic_permission(inode, mask);
+}
+
+static const struct inode_operations proc_tid_comm_inode_operations = {
+               .permission = proc_tid_comm_permission,
+};
+
 /*
  * Tasks
  */
@@ -3091,7 +3216,9 @@ static const struct pid_entry tid_base_stuff[] = {
 #ifdef CONFIG_SCHED_DEBUG
        REG("sched",     S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
-       REG("comm",      S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
+       NOD("comm",      S_IFREG|S_IRUGO|S_IWUSR,
+                        &proc_tid_comm_inode_operations,
+                        &proc_pid_set_comm_operations, {}),
 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
        ONE("syscall",   S_IRUSR, proc_pid_syscall),
 #endif
@@ -3138,8 +3265,8 @@ static const struct pid_entry tid_base_stuff[] = {
        ONE("cgroup",  S_IRUGO, proc_cgroup_show),
 #endif
        ONE("oom_score", S_IRUGO, proc_oom_score),
-       REG("oom_adj",   S_IRUGO|S_IWUSR, proc_oom_adj_operations),
-       REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
+       REG("oom_adj",   S_IRUSR, proc_oom_adj_operations),
+       REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations),
 #ifdef CONFIG_AUDITSYSCALL
        REG("loginuid",  S_IWUSR|S_IRUGO, proc_loginuid_operations),
        REG("sessionid",  S_IRUGO, proc_sessionid_operations),
index 92e6726f6e3732573bd9a64f3b313cc3508ce519..21f198aa0961967f724d8601efb139fe79f6c34b 100644 (file)
@@ -430,6 +430,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
 static ssize_t
 read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
 {
+       char *buf = file->private_data;
        ssize_t acc = 0;
        size_t size, tsz;
        size_t elf_buflen;
@@ -500,23 +501,20 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
                        if (clear_user(buffer, tsz))
                                return -EFAULT;
                } else if (is_vmalloc_or_module_addr((void *)start)) {
-                       char * elf_buf;
-
-                       elf_buf = kzalloc(tsz, GFP_KERNEL);
-                       if (!elf_buf)
-                               return -ENOMEM;
-                       vread(elf_buf, (char *)start, tsz);
+                       vread(buf, (char *)start, tsz);
                        /* we have to zero-fill user buffer even if no read */
-                       if (copy_to_user(buffer, elf_buf, tsz)) {
-                               kfree(elf_buf);
+                       if (copy_to_user(buffer, buf, tsz))
                                return -EFAULT;
-                       }
-                       kfree(elf_buf);
                } else {
                        if (kern_addr_valid(start)) {
                                unsigned long n;
 
-                               n = copy_to_user(buffer, (char *)start, tsz);
+                               /*
+                                * Using bounce buffer to bypass the
+                                * hardened user copy kernel text checks.
+                                */
+                               memcpy(buf, (char *) start, tsz);
+                               n = copy_to_user(buffer, buf, tsz);
                                /*
                                 * We cannot distinguish between fault on source
                                 * and fault on destination. When this happens
@@ -549,6 +547,11 @@ static int open_kcore(struct inode *inode, struct file *filp)
 {
        if (!capable(CAP_SYS_RAWIO))
                return -EPERM;
+
+       filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!filp->private_data)
+               return -ENOMEM;
+
        if (kcore_need_update)
                kcore_update_ram();
        if (i_size_read(inode) != proc_root_kcore->size) {
@@ -559,10 +562,16 @@ static int open_kcore(struct inode *inode, struct file *filp)
        return 0;
 }
 
+static int release_kcore(struct inode *inode, struct file *file)
+{
+       kfree(file->private_data);
+       return 0;
+}
 
 static const struct file_operations proc_kcore_operations = {
        .read           = read_kcore,
        .open           = open_kcore,
+       .release        = release_kcore,
        .llseek         = default_llseek,
 };
 
index d598b9c809c12f71b9d18b8baae631d4117264c2..200e3b29aa22d535a37b17e1d51f194a68e21c1e 100644 (file)
@@ -116,6 +116,56 @@ static void release_task_mempolicy(struct proc_maps_private *priv)
 }
 #endif
 
+static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
+{
+       const char __user *name = vma_get_anon_name(vma);
+       struct mm_struct *mm = vma->vm_mm;
+
+       unsigned long page_start_vaddr;
+       unsigned long page_offset;
+       unsigned long num_pages;
+       unsigned long max_len = NAME_MAX;
+       int i;
+
+       page_start_vaddr = (unsigned long)name & PAGE_MASK;
+       page_offset = (unsigned long)name - page_start_vaddr;
+       num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);
+
+       seq_puts(m, "[anon:");
+
+       for (i = 0; i < num_pages; i++) {
+               int len;
+               int write_len;
+               const char *kaddr;
+               long pages_pinned;
+               struct page *page;
+
+               pages_pinned = get_user_pages(current, mm, page_start_vaddr,
+                               1, 0, 0, &page, NULL);
+               if (pages_pinned < 1) {
+                       seq_puts(m, "<fault>]");
+                       return;
+               }
+
+               kaddr = (const char *)kmap(page);
+               len = min(max_len, PAGE_SIZE - page_offset);
+               write_len = strnlen(kaddr + page_offset, len);
+               seq_write(m, kaddr + page_offset, write_len);
+               kunmap(page);
+               put_page(page);
+
+               /* if strnlen hit a null terminator then we're done */
+               if (write_len != len)
+                       break;
+
+               max_len -= len;
+               page_offset = 0;
+               page_start_vaddr += PAGE_SIZE;
+       }
+
+       seq_putc(m, ']');
+}
+
 static void vma_stop(struct proc_maps_private *priv)
 {
        struct mm_struct *mm = priv->mm;
@@ -341,8 +391,14 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
                        goto done;
                }
 
-               if (is_stack(priv, vma, is_pid))
+               if (is_stack(priv, vma, is_pid)) {
                        name = "[stack]";
+                       goto done;
+               }
+               if (vma_get_anon_name(vma)) {
+                       seq_pad(m, ' ');
+                       seq_print_vma_name(m, vma);
+               }
        }
 
 done:
@@ -667,6 +723,12 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
 
        show_map_vma(m, vma, is_pid);
 
+       if (vma_get_anon_name(vma)) {
+               seq_puts(m, "Name:           ");
+               seq_print_vma_name(m, vma);
+               seq_putc(m, '\n');
+       }
+
        seq_printf(m,
                   "Size:           %8lu kB\n"
                   "Rss:            %8lu kB\n"
index 588461bb2dd482bc7906cb284c57b1e6ff92de87..40a0fe0a4e053abcc83a1195cb8a482f9b061811 100644 (file)
@@ -431,6 +431,40 @@ static int pstore_write_compat(enum pstore_type_id type,
                             size, psi);
 }
 
+static int pstore_write_buf_user_compat(enum pstore_type_id type,
+                              enum kmsg_dump_reason reason,
+                              u64 *id, unsigned int part,
+                              const char __user *buf,
+                              bool compressed, size_t size,
+                              struct pstore_info *psi)
+{
+       unsigned long flags = 0;
+       size_t i, bufsize = size;
+       long ret = 0;
+
+       if (unlikely(!access_ok(VERIFY_READ, buf, size)))
+               return -EFAULT;
+       if (bufsize > psinfo->bufsize)
+               bufsize = psinfo->bufsize;
+       spin_lock_irqsave(&psinfo->buf_lock, flags);
+       for (i = 0; i < size; ) {
+               size_t c = min(size - i, bufsize);
+
+               ret = __copy_from_user(psinfo->buf, buf + i, c);
+               if (unlikely(ret != 0)) {
+                       ret = -EFAULT;
+                       break;
+               }
+               ret = psi->write_buf(type, reason, id, part, psinfo->buf,
+                                    compressed, c, psi);
+               if (unlikely(ret < 0))
+                       break;
+               i += c;
+       }
+       spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+       return unlikely(ret < 0) ? ret : size;
+}
+
 /*
  * platform specific persistent storage driver registers with
  * us here. If pstore is already mounted, call the platform
@@ -453,6 +487,8 @@ int pstore_register(struct pstore_info *psi)
 
        if (!psi->write)
                psi->write = pstore_write_compat;
+       if (!psi->write_buf_user)
+               psi->write_buf_user = pstore_write_buf_user_compat;
        psinfo = psi;
        mutex_init(&psinfo->read_mutex);
        spin_unlock(&pstore_lock);
index 7de20cd3797f1d3929f4abe2f9741c756259c7bd..78f6176c020f8256dad089221af85d9560e5ac2f 100644 (file)
 #include "internal.h"
 
 static DEFINE_MUTEX(pmsg_lock);
-#define PMSG_MAX_BOUNCE_BUFFER_SIZE (2*PAGE_SIZE)
 
 static ssize_t write_pmsg(struct file *file, const char __user *buf,
                          size_t count, loff_t *ppos)
 {
-       size_t i, buffer_size;
-       char *buffer;
+       u64 id;
+       int ret;
 
        if (!count)
                return 0;
 
+       /* check outside lock, page in any data. write_buf_user also checks */
        if (!access_ok(VERIFY_READ, buf, count))
                return -EFAULT;
 
-       buffer_size = count;
-       if (buffer_size > PMSG_MAX_BOUNCE_BUFFER_SIZE)
-               buffer_size = PMSG_MAX_BOUNCE_BUFFER_SIZE;
-       buffer = vmalloc(buffer_size);
-       if (!buffer)
-               return -ENOMEM;
-
        mutex_lock(&pmsg_lock);
-       for (i = 0; i < count; ) {
-               size_t c = min(count - i, buffer_size);
-               u64 id;
-               long ret;
-
-               ret = __copy_from_user(buffer, buf + i, c);
-               if (unlikely(ret != 0)) {
-                       mutex_unlock(&pmsg_lock);
-                       vfree(buffer);
-                       return -EFAULT;
-               }
-               psinfo->write_buf(PSTORE_TYPE_PMSG, 0, &id, 0, buffer, 0, c,
-                                 psinfo);
-
-               i += c;
-       }
-
+       ret = psinfo->write_buf_user(PSTORE_TYPE_PMSG, 0, &id, 0, buf, 0, count,
+                                    psinfo);
        mutex_unlock(&pmsg_lock);
-       vfree(buffer);
-       return count;
+       return ret ? ret : count;
 }
 
 static const struct file_operations pmsg_fops = {
index 905caba36529fe071971fff8bffd3ce22199bd83..8d1e5e2db6a1a772daeb6d923a25a189c5c3b6a5 100644 (file)
@@ -34,6 +34,8 @@
 #include <linux/slab.h>
 #include <linux/compiler.h>
 #include <linux/pstore_ram.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
 
 #define RAMOOPS_KERNMSG_HDR "===="
 #define MIN_MEM_SIZE 4096UL
@@ -329,6 +331,24 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
        return 0;
 }
 
+static int notrace ramoops_pstore_write_buf_user(enum pstore_type_id type,
+                                                enum kmsg_dump_reason reason,
+                                                u64 *id, unsigned int part,
+                                                const char __user *buf,
+                                                bool compressed, size_t size,
+                                                struct pstore_info *psi)
+{
+       if (type == PSTORE_TYPE_PMSG) {
+               struct ramoops_context *cxt = psi->data;
+
+               if (!cxt->mprz)
+                       return -ENOMEM;
+               return persistent_ram_write_user(cxt->mprz, buf, size);
+       }
+
+       return -EINVAL;
+}
+
 static int ramoops_pstore_erase(enum pstore_type_id type, u64 id, int count,
                                struct timespec time, struct pstore_info *psi)
 {
@@ -367,6 +387,7 @@ static struct ramoops_context oops_cxt = {
                .open   = ramoops_pstore_open,
                .read   = ramoops_pstore_read,
                .write_buf      = ramoops_pstore_write_buf,
+               .write_buf_user = ramoops_pstore_write_buf_user,
                .erase  = ramoops_pstore_erase,
        },
 };
@@ -466,6 +487,97 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
        return 0;
 }
 
+void notrace ramoops_console_write_buf(const char *buf, size_t size)
+{
+       struct ramoops_context *cxt = &oops_cxt;
+       persistent_ram_write(cxt->cprz, buf, size);
+}
+
+static int ramoops_parse_dt_size(struct platform_device *pdev,
+               const char *propname, unsigned long *val)
+{
+       u64 val64;
+       int ret;
+
+       ret = of_property_read_u64(pdev->dev.of_node, propname, &val64);
+       if (ret == -EINVAL) {
+               *val = 0;
+               return 0;
+       } else if (ret != 0) {
+               dev_err(&pdev->dev, "failed to parse property %s: %d\n",
+                               propname, ret);
+               return ret;
+       }
+
+       if (val64 > ULONG_MAX) {
+               dev_err(&pdev->dev, "invalid %s %llu\n", propname, val64);
+               return -EOVERFLOW;
+       }
+
+       *val = val64;
+       return 0;
+}
+
+static int ramoops_parse_dt(struct platform_device *pdev,
+               struct ramoops_platform_data *pdata)
+{
+       struct device_node *of_node = pdev->dev.of_node;
+       struct device_node *mem_region;
+       struct resource res;
+       u32 ecc_size;
+       int ret;
+
+       dev_dbg(&pdev->dev, "using Device Tree\n");
+
+       mem_region = of_parse_phandle(of_node, "memory-region", 0);
+       if (!mem_region) {
+               dev_err(&pdev->dev, "no memory-region phandle\n");
+               return -ENODEV;
+       }
+
+       ret = of_address_to_resource(mem_region, 0, &res);
+       of_node_put(mem_region);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to translate memory-region to resource: %d\n",
+                               ret);
+               return ret;
+       }
+
+       pdata->mem_size = resource_size(&res);
+       pdata->mem_address = res.start;
+       pdata->mem_type = of_property_read_bool(of_node, "unbuffered");
+       pdata->dump_oops = !of_property_read_bool(of_node, "no-dump-oops");
+
+       ret = ramoops_parse_dt_size(pdev, "record-size", &pdata->record_size);
+       if (ret < 0)
+               return ret;
+
+       ret = ramoops_parse_dt_size(pdev, "console-size", &pdata->console_size);
+       if (ret < 0)
+               return ret;
+
+       ret = ramoops_parse_dt_size(pdev, "ftrace-size", &pdata->ftrace_size);
+       if (ret < 0)
+               return ret;
+
+       ret = ramoops_parse_dt_size(pdev, "pmsg-size", &pdata->pmsg_size);
+       if (ret < 0)
+               return ret;
+
+       ret = of_property_read_u32(of_node, "ecc-size", &ecc_size);
+       if (ret == 0) {
+               if (ecc_size > INT_MAX) {
+                       dev_err(&pdev->dev, "invalid ecc-size %u\n", ecc_size);
+                       return -EOVERFLOW;
+               }
+               pdata->ecc_info.ecc_size = ecc_size;
+       } else if (ret != -EINVAL) {
+               return ret;
+       }
+
+       return 0;
+}
+
 static int ramoops_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -475,6 +587,18 @@ static int ramoops_probe(struct platform_device *pdev)
        phys_addr_t paddr;
        int err = -EINVAL;
 
+       if (dev->of_node && !pdata) {
+               pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+               if (!pdata) {
+                       err = -ENOMEM;
+                       goto fail_out;
+               }
+
+               err = ramoops_parse_dt(pdev, pdata);
+               if (err < 0)
+                       goto fail_out;
+       }
+
        /* Only a single ramoops area allowed at a time, so fail extra
         * probes.
         */
@@ -603,11 +727,17 @@ static int ramoops_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id dt_match[] = {
+       { .compatible = "ramoops" },
+       {}
+};
+
 static struct platform_driver ramoops_driver = {
        .probe          = ramoops_probe,
        .remove         = ramoops_remove,
        .driver         = {
-               .name   = "ramoops",
+               .name           = "ramoops",
+               .of_match_table = dt_match,
        },
 };
 
index 364d2dffe5a61f2be66cf7bb1c8331c838660445..3975deec02f8ca9202c42662cba69f9783605158 100644 (file)
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/errno.h>
-#include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/io.h>
+#include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/memblock.h>
+#include <linux/pstore_ram.h>
 #include <linux/rslib.h>
 #include <linux/slab.h>
+#include <linux/uaccess.h>
 #include <linux/vmalloc.h>
-#include <linux/pstore_ram.h>
 #include <asm/page.h>
 
 struct persistent_ram_buffer {
@@ -267,6 +268,16 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
        persistent_ram_update_ecc(prz, start, count);
 }
 
+static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz,
+       const void __user *s, unsigned int start, unsigned int count)
+{
+       struct persistent_ram_buffer *buffer = prz->buffer;
+       int ret = unlikely(__copy_from_user(buffer->data + start, s, count)) ?
+               -EFAULT : 0;
+       persistent_ram_update_ecc(prz, start, count);
+       return ret;
+}
+
 void persistent_ram_save_old(struct persistent_ram_zone *prz)
 {
        struct persistent_ram_buffer *buffer = prz->buffer;
@@ -320,6 +331,38 @@ int notrace persistent_ram_write(struct persistent_ram_zone *prz,
        return count;
 }
 
+int notrace persistent_ram_write_user(struct persistent_ram_zone *prz,
+       const void __user *s, unsigned int count)
+{
+       int rem, ret = 0, c = count;
+       size_t start;
+
+       if (unlikely(!access_ok(VERIFY_READ, s, count)))
+               return -EFAULT;
+       if (unlikely(c > prz->buffer_size)) {
+               s += c - prz->buffer_size;
+               c = prz->buffer_size;
+       }
+
+       buffer_size_add(prz, c);
+
+       start = buffer_start_add(prz, c);
+
+       rem = prz->buffer_size - start;
+       if (unlikely(rem < c)) {
+               ret = persistent_ram_update_user(prz, s, start, rem);
+               s += rem;
+               c -= rem;
+               start = 0;
+       }
+       if (likely(!ret))
+               ret = persistent_ram_update_user(prz, s, start, c);
+
+       persistent_ram_update_header_ecc(prz);
+
+       return unlikely(ret) ? ret : count;
+}
+
 size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
 {
        return prz->old_log_size;
diff --git a/fs/sdcardfs/Kconfig b/fs/sdcardfs/Kconfig
new file mode 100644 (file)
index 0000000..a1c1033
--- /dev/null
@@ -0,0 +1,13 @@
+config SDCARD_FS
+       tristate "sdcard file system"
+       depends on CONFIGFS_FS
+       default n
+       help
+         Sdcardfs is based on Wrapfs file system.
+
+config SDCARD_FS_FADV_NOACTIVE
+       bool "sdcardfs fadvise noactive support"
+       depends on FADV_NOACTIVE
+       default y
+       help
+         Sdcardfs supports fadvise noactive mode.
diff --git a/fs/sdcardfs/Makefile b/fs/sdcardfs/Makefile
new file mode 100644 (file)
index 0000000..b84fbb2
--- /dev/null
@@ -0,0 +1,7 @@
+SDCARDFS_VERSION="0.1"
+
+EXTRA_CFLAGS += -DSDCARDFS_VERSION=\"$(SDCARDFS_VERSION)\"
+
+obj-$(CONFIG_SDCARD_FS) += sdcardfs.o
+
+sdcardfs-y := dentry.o file.o inode.o main.o super.o lookup.o mmap.o packagelist.o derived_perm.o
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
new file mode 100644 (file)
index 0000000..971928a
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * fs/sdcardfs/dentry.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+#include "linux/ctype.h"
+
+/*
+ * returns: -ERRNO if error (returned to user)
+ *          0: tell VFS to invalidate dentry
+ *          1: dentry is valid
+ */
+static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       int err = 1;
+       struct path parent_lower_path, lower_path;
+       struct dentry *parent_dentry = NULL;
+       struct dentry *parent_lower_dentry = NULL;
+       struct dentry *lower_cur_parent_dentry = NULL;
+       struct dentry *lower_dentry = NULL;
+
+       if (flags & LOOKUP_RCU)
+               return -ECHILD;
+
+       spin_lock(&dentry->d_lock);
+       if (IS_ROOT(dentry)) {
+               spin_unlock(&dentry->d_lock);
+               return 1;
+       }
+       spin_unlock(&dentry->d_lock);
+
+       /* check uninitialized obb_dentry and
+        * whether the base obbpath has been changed or not */
+       if (is_obbpath_invalid(dentry)) {
+               d_drop(dentry);
+               return 0;
+       }
+
+       parent_dentry = dget_parent(dentry);
+       sdcardfs_get_lower_path(parent_dentry, &parent_lower_path);
+       sdcardfs_get_real_lower(dentry, &lower_path);
+       parent_lower_dentry = parent_lower_path.dentry;
+       lower_dentry = lower_path.dentry;
+       lower_cur_parent_dentry = dget_parent(lower_dentry);
+
+       spin_lock(&lower_dentry->d_lock);
+       if (d_unhashed(lower_dentry)) {
+               spin_unlock(&lower_dentry->d_lock);
+               d_drop(dentry);
+               err = 0;
+               goto out;
+       }
+       spin_unlock(&lower_dentry->d_lock);
+
+       if (parent_lower_dentry != lower_cur_parent_dentry) {
+               d_drop(dentry);
+               err = 0;
+               goto out;
+       }
+
+       if (dentry < lower_dentry) {
+               spin_lock(&dentry->d_lock);
+               spin_lock(&lower_dentry->d_lock);
+       } else {
+               spin_lock(&lower_dentry->d_lock);
+               spin_lock(&dentry->d_lock);
+       }
+
+       if (dentry->d_name.len != lower_dentry->d_name.len) {
+               __d_drop(dentry);
+               err = 0;
+       } else if (strncasecmp(dentry->d_name.name, lower_dentry->d_name.name,
+                               dentry->d_name.len) != 0) {
+               __d_drop(dentry);
+               err = 0;
+       }
+
+       if (dentry < lower_dentry) {
+               spin_unlock(&lower_dentry->d_lock);
+               spin_unlock(&dentry->d_lock);
+       } else {
+               spin_unlock(&dentry->d_lock);
+               spin_unlock(&lower_dentry->d_lock);
+       }
+
+out:
+       dput(parent_dentry);
+       dput(lower_cur_parent_dentry);
+       sdcardfs_put_lower_path(parent_dentry, &parent_lower_path);
+       sdcardfs_put_real_lower(dentry, &lower_path);
+       return err;
+}
+
+static void sdcardfs_d_release(struct dentry *dentry)
+{
+       /* release and reset the lower paths */
+       if(has_graft_path(dentry)) {
+               sdcardfs_put_reset_orig_path(dentry);
+       }
+       sdcardfs_put_reset_lower_path(dentry);
+       free_dentry_private_data(dentry);
+       return;
+}
+
+static int sdcardfs_hash_ci(const struct dentry *dentry,
+                               struct qstr *qstr)
+{
+       /*
+        * This function is copy of vfat_hashi.
+        * FIXME Should we support national language?
+        *       Refer to vfat_hashi()
+        * struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io;
+        */
+       const unsigned char *name;
+       unsigned int len;
+       unsigned long hash;
+
+       name = qstr->name;
+       //len = vfat_striptail_len(qstr);
+       len = qstr->len;
+
+       hash = init_name_hash();
+       while (len--)
+               //hash = partial_name_hash(nls_tolower(t, *name++), hash);
+               hash = partial_name_hash(tolower(*name++), hash);
+       qstr->hash = end_name_hash(hash);
+
+       return 0;
+}
+
+/*
+ * Case insensitive compare of two vfat names.
+ */
+static int sdcardfs_cmp_ci(const struct dentry *parent,
+               const struct dentry *dentry,
+               unsigned int len, const char *str, const struct qstr *name)
+{
+       /* This function is copy of vfat_cmpi */
+       // FIXME Should we support national language?
+       //struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io;
+       //unsigned int alen, blen;
+
+       /* A filename cannot end in '.' or we treat it like it has none */
+       /*
+       alen = vfat_striptail_len(name);
+       blen = __vfat_striptail_len(len, str);
+       if (alen == blen) {
+               if (nls_strnicmp(t, name->name, str, alen) == 0)
+                       return 0;
+       }
+       */
+       if (name->len == len) {
+               if (strncasecmp(name->name, str, len) == 0)
+                       return 0;
+       }
+       return 1;
+}
+
+static void sdcardfs_canonical_path(const struct path *path, struct path *actual_path) {
+       sdcardfs_get_real_lower(path->dentry, actual_path);
+}
+
+const struct dentry_operations sdcardfs_ci_dops = {
+       .d_revalidate   = sdcardfs_d_revalidate,
+       .d_release      = sdcardfs_d_release,
+       .d_hash         = sdcardfs_hash_ci,
+       .d_compare      = sdcardfs_cmp_ci,
+       .d_canonical_path = sdcardfs_canonical_path,
+};
+
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
new file mode 100644 (file)
index 0000000..41e0e11
--- /dev/null
@@ -0,0 +1,265 @@
+/*
+ * fs/sdcardfs/derived_perm.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+
+/* copy derived state from parent inode */
+static void inherit_derived_state(struct inode *parent, struct inode *child)
+{
+       struct sdcardfs_inode_info *pi = SDCARDFS_I(parent);
+       struct sdcardfs_inode_info *ci = SDCARDFS_I(child);
+
+       ci->perm = PERM_INHERIT;
+       ci->userid = pi->userid;
+       ci->d_uid = pi->d_uid;
+       ci->under_android = pi->under_android;
+}
+
+/* helper function for derived state */
+void setup_derived_state(struct inode *inode, perm_t perm,
+                        userid_t userid, uid_t uid, bool under_android)
+{
+       struct sdcardfs_inode_info *info = SDCARDFS_I(inode);
+
+       info->perm = perm;
+       info->userid = userid;
+       info->d_uid = uid;
+       info->under_android = under_android;
+}
+
+/* While renaming, there is a point where we want the path from dentry, but the name from newdentry */
+void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, struct dentry *newdentry)
+{
+       struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+       struct sdcardfs_inode_info *info = SDCARDFS_I(dentry->d_inode);
+       struct sdcardfs_inode_info *parent_info= SDCARDFS_I(parent->d_inode);
+       appid_t appid;
+
+       /* By default, each inode inherits from its parent.
+        * the properties are maintained on its private fields
+        * because the inode attributes will be modified with that of
+        * its lower inode.
+        * The derived state will be updated on the last
+        * stage of each system call by fix_derived_permission(inode).
+        */
+
+       inherit_derived_state(parent->d_inode, dentry->d_inode);
+
+       /* Derive custom permissions based on parent and current node */
+       switch (parent_info->perm) {
+               case PERM_INHERIT:
+                       /* Already inherited above */
+                       break;
+               case PERM_PRE_ROOT:
+                       /* Legacy internal layout places users at top level */
+                       info->perm = PERM_ROOT;
+                       info->userid = simple_strtoul(newdentry->d_name.name, NULL, 10);
+                       break;
+               case PERM_ROOT:
+                       /* Assume masked off by default. */
+                       if (!strcasecmp(newdentry->d_name.name, "Android")) {
+                               /* App-specific directories inside; let anyone traverse */
+                               info->perm = PERM_ANDROID;
+                               info->under_android = true;
+                       }
+                       break;
+               case PERM_ANDROID:
+                       if (!strcasecmp(newdentry->d_name.name, "data")) {
+                               /* App-specific directories inside; let anyone traverse */
+                               info->perm = PERM_ANDROID_DATA;
+                       } else if (!strcasecmp(newdentry->d_name.name, "obb")) {
+                               /* App-specific directories inside; let anyone traverse */
+                               info->perm = PERM_ANDROID_OBB;
+                               /* Single OBB directory is always shared */
+                       } else if (!strcasecmp(newdentry->d_name.name, "media")) {
+                               /* App-specific directories inside; let anyone traverse */
+                               info->perm = PERM_ANDROID_MEDIA;
+                       }
+                       break;
+               case PERM_ANDROID_DATA:
+               case PERM_ANDROID_OBB:
+               case PERM_ANDROID_MEDIA:
+                       appid = get_appid(sbi->pkgl_id, newdentry->d_name.name);
+                       if (appid != 0) {
+                               info->d_uid = multiuser_get_uid(parent_info->userid, appid);
+                       }
+                       break;
+       }
+}
+
+void get_derived_permission(struct dentry *parent, struct dentry *dentry)
+{
+       get_derived_permission_new(parent, dentry, dentry);
+}
+
+void get_derive_permissions_recursive(struct dentry *parent) {
+       struct dentry *dentry;
+       list_for_each_entry(dentry, &parent->d_subdirs, d_child) {
+               if (dentry->d_inode) {
+                       mutex_lock(&dentry->d_inode->i_mutex);
+                       get_derived_permission(parent, dentry);
+                       fix_derived_permission(dentry->d_inode);
+                       get_derive_permissions_recursive(dentry);
+                       mutex_unlock(&dentry->d_inode->i_mutex);
+               }
+       }
+}
+
+/* main function for updating derived permission */
+inline void update_derived_permission_lock(struct dentry *dentry)
+{
+       struct dentry *parent;
+
+       if(!dentry || !dentry->d_inode) {
+               printk(KERN_ERR "sdcardfs: %s: invalid dentry\n", __func__);
+               return;
+       }
+       /* FIXME:
+        * 1. need to check whether the dentry is updated or not
+        * 2. remove the root dentry update
+        */
+       mutex_lock(&dentry->d_inode->i_mutex);
+       if(IS_ROOT(dentry)) {
+               //setup_default_pre_root_state(dentry->d_inode);
+       } else {
+               parent = dget_parent(dentry);
+               if(parent) {
+                       get_derived_permission(parent, dentry);
+                       dput(parent);
+               }
+       }
+       fix_derived_permission(dentry->d_inode);
+       mutex_unlock(&dentry->d_inode->i_mutex);
+}
+
+int need_graft_path(struct dentry *dentry)
+{
+       int ret = 0;
+       struct dentry *parent = dget_parent(dentry);
+       struct sdcardfs_inode_info *parent_info= SDCARDFS_I(parent->d_inode);
+       struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+
+       if(parent_info->perm == PERM_ANDROID &&
+                       !strcasecmp(dentry->d_name.name, "obb")) {
+
+               /* /Android/obb is the base obbpath of DERIVED_UNIFIED */
+               if(!(sbi->options.multiuser == false
+                               && parent_info->userid == 0)) {
+                       ret = 1;
+               }
+       }
+       dput(parent);
+       return ret;
+}
+
+int is_obbpath_invalid(struct dentry *dent)
+{
+       int ret = 0;
+       struct sdcardfs_dentry_info *di = SDCARDFS_D(dent);
+       struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dent->d_sb);
+       char *path_buf, *obbpath_s;
+
+       /* check the base obbpath has been changed.
+        * this routine can check an uninitialized obb dentry as well.
+        * regarding the uninitialized obb, refer to the sdcardfs_mkdir() */
+       spin_lock(&di->lock);
+       if(di->orig_path.dentry) {
+               if(!di->lower_path.dentry) {
+                       ret = 1;
+               } else {
+                       path_get(&di->lower_path);
+                       //lower_parent = lock_parent(lower_path->dentry);
+
+                       path_buf = kmalloc(PATH_MAX, GFP_ATOMIC);
+                       if(!path_buf) {
+                               ret = 1;
+                               printk(KERN_ERR "sdcardfs: fail to allocate path_buf in %s.\n", __func__);
+                       } else {
+                               obbpath_s = d_path(&di->lower_path, path_buf, PATH_MAX);
+                               if (d_unhashed(di->lower_path.dentry) ||
+                                       strcasecmp(sbi->obbpath_s, obbpath_s)) {
+                                       ret = 1;
+                               }
+                               kfree(path_buf);
+                       }
+
+                       //unlock_dir(lower_parent);
+                       path_put(&di->lower_path);
+               }
+       }
+       spin_unlock(&di->lock);
+       return ret;
+}
+
+int is_base_obbpath(struct dentry *dentry)
+{
+       int ret = 0;
+       struct dentry *parent = dget_parent(dentry);
+       struct sdcardfs_inode_info *parent_info= SDCARDFS_I(parent->d_inode);
+       struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+
+       spin_lock(&SDCARDFS_D(dentry)->lock);
+       if (sbi->options.multiuser) {
+               if(parent_info->perm == PERM_PRE_ROOT &&
+                               !strcasecmp(dentry->d_name.name, "obb")) {
+                       ret = 1;
+               }
+       } else  if (parent_info->perm == PERM_ANDROID &&
+                       !strcasecmp(dentry->d_name.name, "obb")) {
+               ret = 1;
+       }
+       spin_unlock(&SDCARDFS_D(dentry)->lock);
+       return ret;
+}
+
+/* The lower_path will be stored to the dentry's orig_path
+ * and the base obbpath will be copyed to the lower_path variable.
+ * if an error returned, there's no change in the lower_path
+ * returns: -ERRNO if error (0: no error) */
+int setup_obb_dentry(struct dentry *dentry, struct path *lower_path)
+{
+       int err = 0;
+       struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+       struct path obbpath;
+
+       /* A local obb dentry must have its own orig_path to support rmdir
+        * and mkdir of itself. Usually, we expect that the sbi->obbpath
+        * is avaiable on this stage. */
+       sdcardfs_set_orig_path(dentry, lower_path);
+
+       err = kern_path(sbi->obbpath_s,
+                       LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &obbpath);
+
+       if(!err) {
+               /* the obbpath base has been found */
+               printk(KERN_INFO "sdcardfs: the sbi->obbpath is found\n");
+               pathcpy(lower_path, &obbpath);
+       } else {
+               /* if the sbi->obbpath is not available, we can optionally
+                * setup the lower_path with its orig_path.
+                * but, the current implementation just returns an error
+                * because the sdcard daemon also regards this case as
+                * a lookup fail. */
+               printk(KERN_INFO "sdcardfs: the sbi->obbpath is not available\n");
+       }
+       return err;
+}
+
+
diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c
new file mode 100644 (file)
index 0000000..c249fa9
--- /dev/null
@@ -0,0 +1,356 @@
+/*
+ * fs/sdcardfs/file.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+#ifdef CONFIG_SDCARD_FS_FADV_NOACTIVE
+#include <linux/backing-dev.h>
+#endif
+
+static ssize_t sdcardfs_read(struct file *file, char __user *buf,
+                          size_t count, loff_t *ppos)
+{
+       int err;
+       struct file *lower_file;
+       struct dentry *dentry = file->f_path.dentry;
+#ifdef CONFIG_SDCARD_FS_FADV_NOACTIVE
+       struct backing_dev_info *bdi;
+#endif
+
+       lower_file = sdcardfs_lower_file(file);
+
+#ifdef CONFIG_SDCARD_FS_FADV_NOACTIVE
+       if (file->f_mode & FMODE_NOACTIVE) {
+               if (!(lower_file->f_mode & FMODE_NOACTIVE)) {
+                       bdi = lower_file->f_mapping->backing_dev_info;
+                       lower_file->f_ra.ra_pages = bdi->ra_pages * 2;
+                       spin_lock(&lower_file->f_lock);
+                       lower_file->f_mode |= FMODE_NOACTIVE;
+                       spin_unlock(&lower_file->f_lock);
+               }
+       }
+#endif
+
+       err = vfs_read(lower_file, buf, count, ppos);
+       /* update our inode atime upon a successful lower read */
+       if (err >= 0)
+               fsstack_copy_attr_atime(d_inode(dentry),
+                                       file_inode(lower_file));
+
+       return err;
+}
+
+static ssize_t sdcardfs_write(struct file *file, const char __user *buf,
+                           size_t count, loff_t *ppos)
+{
+       int err;
+       struct file *lower_file;
+       struct dentry *dentry = file->f_path.dentry;
+
+       /* check disk space */
+       if (!check_min_free_space(dentry, count, 0)) {
+               printk(KERN_INFO "No minimum free space.\n");
+               return -ENOSPC;
+       }
+
+       lower_file = sdcardfs_lower_file(file);
+       err = vfs_write(lower_file, buf, count, ppos);
+       /* update our inode times+sizes upon a successful lower write */
+       if (err >= 0) {
+               fsstack_copy_inode_size(d_inode(dentry),
+                                       file_inode(lower_file));
+               fsstack_copy_attr_times(d_inode(dentry),
+                                       file_inode(lower_file));
+       }
+
+       return err;
+}
+
+static int sdcardfs_readdir(struct file *file, struct dir_context *ctx)
+{
+       int err;
+       struct file *lower_file = NULL;
+       struct dentry *dentry = file->f_path.dentry;
+
+       lower_file = sdcardfs_lower_file(file);
+
+       lower_file->f_pos = file->f_pos;
+       err = iterate_dir(lower_file, ctx);
+       file->f_pos = lower_file->f_pos;
+       if (err >= 0)           /* copy the atime */
+               fsstack_copy_attr_atime(d_inode(dentry),
+                                       file_inode(lower_file));
+       return err;
+}
+
+static long sdcardfs_unlocked_ioctl(struct file *file, unsigned int cmd,
+                                 unsigned long arg)
+{
+       long err = -ENOTTY;
+       struct file *lower_file;
+
+       lower_file = sdcardfs_lower_file(file);
+
+       /* XXX: use vfs_ioctl if/when VFS exports it */
+       if (!lower_file || !lower_file->f_op)
+               goto out;
+       if (lower_file->f_op->unlocked_ioctl)
+               err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
+
+out:
+       return err;
+}
+
+#ifdef CONFIG_COMPAT
+static long sdcardfs_compat_ioctl(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       long err = -ENOTTY;
+       struct file *lower_file;
+
+       lower_file = sdcardfs_lower_file(file);
+
+       /* XXX: use vfs_ioctl if/when VFS exports it */
+       if (!lower_file || !lower_file->f_op)
+               goto out;
+       if (lower_file->f_op->compat_ioctl)
+               err = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
+
+out:
+       return err;
+}
+#endif
+
+static int sdcardfs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       int err = 0;
+       bool willwrite;
+       struct file *lower_file;
+       const struct vm_operations_struct *saved_vm_ops = NULL;
+
+       /* this might be deferred to mmap's writepage */
+       willwrite = ((vma->vm_flags | VM_SHARED | VM_WRITE) == vma->vm_flags);
+
+       /*
+        * File systems which do not implement ->writepage may use
+        * generic_file_readonly_mmap as their ->mmap op.  If you call
+        * generic_file_readonly_mmap with VM_WRITE, you'd get an -EINVAL.
+        * But we cannot call the lower ->mmap op, so we can't tell that
+        * writeable mappings won't work.  Therefore, our only choice is to
+        * check if the lower file system supports the ->writepage, and if
+        * not, return EINVAL (the same error that
+        * generic_file_readonly_mmap returns in that case).
+        */
+       lower_file = sdcardfs_lower_file(file);
+       if (willwrite && !lower_file->f_mapping->a_ops->writepage) {
+               err = -EINVAL;
+               printk(KERN_ERR "sdcardfs: lower file system does not "
+                      "support writeable mmap\n");
+               goto out;
+       }
+
+       /*
+        * find and save lower vm_ops.
+        *
+        * XXX: the VFS should have a cleaner way of finding the lower vm_ops
+        */
+       if (!SDCARDFS_F(file)->lower_vm_ops) {
+               err = lower_file->f_op->mmap(lower_file, vma);
+               if (err) {
+                       printk(KERN_ERR "sdcardfs: lower mmap failed %d\n", err);
+                       goto out;
+               }
+               saved_vm_ops = vma->vm_ops; /* save: came from lower ->mmap */
+               err = do_munmap(current->mm, vma->vm_start,
+                               vma->vm_end - vma->vm_start);
+               if (err) {
+                       printk(KERN_ERR "sdcardfs: do_munmap failed %d\n", err);
+                       goto out;
+               }
+       }
+
+       /*
+        * Next 3 lines are all I need from generic_file_mmap.  I definitely
+        * don't want its test for ->readpage which returns -ENOEXEC.
+        */
+       file_accessed(file);
+       vma->vm_ops = &sdcardfs_vm_ops;
+
+       file->f_mapping->a_ops = &sdcardfs_aops; /* set our aops */
+       if (!SDCARDFS_F(file)->lower_vm_ops) /* save for our ->fault */
+               SDCARDFS_F(file)->lower_vm_ops = saved_vm_ops;
+
+out:
+       return err;
+}
+
+static int sdcardfs_open(struct inode *inode, struct file *file)
+{
+       int err = 0;
+       struct file *lower_file = NULL;
+       struct path lower_path;
+       struct dentry *dentry = file->f_path.dentry;
+       struct dentry *parent = dget_parent(dentry);
+       struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+       const struct cred *saved_cred = NULL;
+
+       /* don't open unhashed/deleted files */
+       if (d_unhashed(dentry)) {
+               err = -ENOENT;
+               goto out_err;
+       }
+
+       if(!check_caller_access_to_name(parent->d_inode, dentry->d_name.name)) {
+               printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+                         "     dentry: %s, task:%s\n",
+                                                __func__, dentry->d_name.name, current->comm);
+               err = -EACCES;
+               goto out_err;
+       }
+
+       /* save current_cred and override it */
+       OVERRIDE_CRED(sbi, saved_cred);
+
+       file->private_data =
+               kzalloc(sizeof(struct sdcardfs_file_info), GFP_KERNEL);
+       if (!SDCARDFS_F(file)) {
+               err = -ENOMEM;
+               goto out_revert_cred;
+       }
+
+       /* open lower object and link sdcardfs's file struct to lower's */
+       sdcardfs_get_lower_path(file->f_path.dentry, &lower_path);
+       lower_file = dentry_open(&lower_path, file->f_flags, current_cred());
+       path_put(&lower_path);
+       if (IS_ERR(lower_file)) {
+               err = PTR_ERR(lower_file);
+               lower_file = sdcardfs_lower_file(file);
+               if (lower_file) {
+                       sdcardfs_set_lower_file(file, NULL);
+                       fput(lower_file); /* fput calls dput for lower_dentry */
+               }
+       } else {
+               sdcardfs_set_lower_file(file, lower_file);
+       }
+
+       if (err)
+               kfree(SDCARDFS_F(file));
+       else {
+               sdcardfs_copy_and_fix_attrs(inode, sdcardfs_lower_inode(inode));
+       }
+
+out_revert_cred:
+       REVERT_CRED(saved_cred);
+out_err:
+       dput(parent);
+       return err;
+}
+
+static int sdcardfs_flush(struct file *file, fl_owner_t id)
+{
+       int err = 0;
+       struct file *lower_file = NULL;
+
+       lower_file = sdcardfs_lower_file(file);
+       if (lower_file && lower_file->f_op && lower_file->f_op->flush) {
+               filemap_write_and_wait(file->f_mapping);
+               err = lower_file->f_op->flush(lower_file, id);
+       }
+
+       return err;
+}
+
+/* release all lower object references & free the file info structure */
+static int sdcardfs_file_release(struct inode *inode, struct file *file)
+{
+       struct file *lower_file;
+
+       lower_file = sdcardfs_lower_file(file);
+       if (lower_file) {
+               sdcardfs_set_lower_file(file, NULL);
+               fput(lower_file);
+       }
+
+       kfree(SDCARDFS_F(file));
+       return 0;
+}
+
+static int sdcardfs_fsync(struct file *file, loff_t start, loff_t end,
+                       int datasync)
+{
+       int err;
+       struct file *lower_file;
+       struct path lower_path;
+       struct dentry *dentry = file->f_path.dentry;
+
+       err = __generic_file_fsync(file, start, end, datasync);
+       if (err)
+               goto out;
+
+       lower_file = sdcardfs_lower_file(file);
+       sdcardfs_get_lower_path(dentry, &lower_path);
+       err = vfs_fsync_range(lower_file, start, end, datasync);
+       sdcardfs_put_lower_path(dentry, &lower_path);
+out:
+       return err;
+}
+
+static int sdcardfs_fasync(int fd, struct file *file, int flag)
+{
+       int err = 0;
+       struct file *lower_file = NULL;
+
+       lower_file = sdcardfs_lower_file(file);
+       if (lower_file->f_op && lower_file->f_op->fasync)
+               err = lower_file->f_op->fasync(fd, lower_file, flag);
+
+       return err;
+}
+
+const struct file_operations sdcardfs_main_fops = {
+       .llseek         = generic_file_llseek,
+       .read           = sdcardfs_read,
+       .write          = sdcardfs_write,
+       .unlocked_ioctl = sdcardfs_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = sdcardfs_compat_ioctl,
+#endif
+       .mmap           = sdcardfs_mmap,
+       .open           = sdcardfs_open,
+       .flush          = sdcardfs_flush,
+       .release        = sdcardfs_file_release,
+       .fsync          = sdcardfs_fsync,
+       .fasync         = sdcardfs_fasync,
+};
+
+/* trimmed directory options */
+const struct file_operations sdcardfs_dir_fops = {
+       .llseek         = generic_file_llseek,
+       .read           = generic_read_dir,
+       .iterate        = sdcardfs_readdir,
+       .unlocked_ioctl = sdcardfs_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = sdcardfs_compat_ioctl,
+#endif
+       .open           = sdcardfs_open,
+       .release        = sdcardfs_file_release,
+       .flush          = sdcardfs_flush,
+       .fsync          = sdcardfs_fsync,
+       .fasync         = sdcardfs_fasync,
+};
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
new file mode 100644 (file)
index 0000000..2528da0
--- /dev/null
@@ -0,0 +1,802 @@
+/*
+ * fs/sdcardfs/inode.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+
+/* Do not directly use this function. Use OVERRIDE_CRED() instead. */
+const struct cred * override_fsids(struct sdcardfs_sb_info* sbi)
+{
+       struct cred * cred;
+       const struct cred * old_cred;
+
+       cred = prepare_creds();
+       if (!cred)
+               return NULL;
+
+       cred->fsuid = make_kuid(&init_user_ns, sbi->options.fs_low_uid);
+       cred->fsgid = make_kgid(&init_user_ns, sbi->options.fs_low_gid);
+
+       old_cred = override_creds(cred);
+
+       return old_cred;
+}
+
+/* Do not directly use this function, use REVERT_CRED() instead. */
+void revert_fsids(const struct cred * old_cred)
+{
+       const struct cred * cur_cred;
+
+       cur_cred = current->cred;
+       revert_creds(old_cred);
+       put_cred(cur_cred);
+}
+
+static int sdcardfs_create(struct inode *dir, struct dentry *dentry,
+                        umode_t mode, bool want_excl)
+{
+       int err;
+       struct dentry *lower_dentry;
+       struct dentry *lower_parent_dentry = NULL;
+       struct path lower_path;
+       const struct cred *saved_cred = NULL;
+
+       if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
+               printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+                                                "  dentry: %s, task:%s\n",
+                                                __func__, dentry->d_name.name, current->comm);
+               err = -EACCES;
+               goto out_eacces;
+       }
+
+       /* save current_cred and override it */
+       OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+
+       sdcardfs_get_lower_path(dentry, &lower_path);
+       lower_dentry = lower_path.dentry;
+       lower_parent_dentry = lock_parent(lower_dentry);
+
+       /* set last 16bytes of mode field to 0664 */
+       mode = (mode & S_IFMT) | 00664;
+       err = vfs_create(d_inode(lower_parent_dentry), lower_dentry, mode, want_excl);
+       if (err)
+               goto out;
+
+       err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path, SDCARDFS_I(dir)->userid);
+       if (err)
+               goto out;
+       fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
+       fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
+
+out:
+       unlock_dir(lower_parent_dentry);
+       sdcardfs_put_lower_path(dentry, &lower_path);
+       REVERT_CRED(saved_cred);
+out_eacces:
+       return err;
+}
+
+#if 0
+static int sdcardfs_link(struct dentry *old_dentry, struct inode *dir,
+                      struct dentry *new_dentry)
+{
+       struct dentry *lower_old_dentry;
+       struct dentry *lower_new_dentry;
+       struct dentry *lower_dir_dentry;
+       u64 file_size_save;
+       int err;
+       struct path lower_old_path, lower_new_path;
+
+       OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
+
+       file_size_save = i_size_read(d_inode(old_dentry));
+       sdcardfs_get_lower_path(old_dentry, &lower_old_path);
+       sdcardfs_get_lower_path(new_dentry, &lower_new_path);
+       lower_old_dentry = lower_old_path.dentry;
+       lower_new_dentry = lower_new_path.dentry;
+       lower_dir_dentry = lock_parent(lower_new_dentry);
+
+       err = vfs_link(lower_old_dentry, d_inode(lower_dir_dentry),
+                      lower_new_dentry, NULL);
+       if (err || !d_inode(lower_new_dentry))
+               goto out;
+
+       err = sdcardfs_interpose(new_dentry, dir->i_sb, &lower_new_path);
+       if (err)
+               goto out;
+       fsstack_copy_attr_times(dir, d_inode(lower_new_dentry));
+       fsstack_copy_inode_size(dir, d_inode(lower_new_dentry));
+       set_nlink(d_inode(old_dentry),
+                 sdcardfs_lower_inode(d_inode(old_dentry))->i_nlink);
+       i_size_write(d_inode(new_dentry), file_size_save);
+out:
+       unlock_dir(lower_dir_dentry);
+       sdcardfs_put_lower_path(old_dentry, &lower_old_path);
+       sdcardfs_put_lower_path(new_dentry, &lower_new_path);
+       REVERT_CRED();
+       return err;
+}
+#endif
+
+static int sdcardfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+       int err;
+       struct dentry *lower_dentry;
+       struct inode *lower_dir_inode = sdcardfs_lower_inode(dir);
+       struct dentry *lower_dir_dentry;
+       struct path lower_path;
+       const struct cred *saved_cred = NULL;
+
+       if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
+               printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+                                                "  dentry: %s, task:%s\n",
+                                                __func__, dentry->d_name.name, current->comm);
+               err = -EACCES;
+               goto out_eacces;
+       }
+
+       /* save current_cred and override it */
+       OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+
+       sdcardfs_get_lower_path(dentry, &lower_path);
+       lower_dentry = lower_path.dentry;
+       dget(lower_dentry);
+       lower_dir_dentry = lock_parent(lower_dentry);
+
+       err = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
+
+       /*
+        * Note: unlinking on top of NFS can cause silly-renamed files.
+        * Trying to delete such files results in EBUSY from NFS
+        * below.  Silly-renamed files will get deleted by NFS later on, so
+        * we just need to detect them here and treat such EBUSY errors as
+        * if the upper file was successfully deleted.
+        */
+       if (err == -EBUSY && lower_dentry->d_flags & DCACHE_NFSFS_RENAMED)
+               err = 0;
+       if (err)
+               goto out;
+       fsstack_copy_attr_times(dir, lower_dir_inode);
+       fsstack_copy_inode_size(dir, lower_dir_inode);
+       set_nlink(d_inode(dentry),
+                 sdcardfs_lower_inode(d_inode(dentry))->i_nlink);
+       d_inode(dentry)->i_ctime = dir->i_ctime;
+       d_drop(dentry); /* this is needed, else LTP fails (VFS won't do it) */
+out:
+       unlock_dir(lower_dir_dentry);
+       dput(lower_dentry);
+       sdcardfs_put_lower_path(dentry, &lower_path);
+       REVERT_CRED(saved_cred);
+out_eacces:
+       return err;
+}
+
+#if 0
+static int sdcardfs_symlink(struct inode *dir, struct dentry *dentry,
+                         const char *symname)
+{
+       int err;
+       struct dentry *lower_dentry;
+       struct dentry *lower_parent_dentry = NULL;
+       struct path lower_path;
+
+       OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
+
+       sdcardfs_get_lower_path(dentry, &lower_path);
+       lower_dentry = lower_path.dentry;
+       lower_parent_dentry = lock_parent(lower_dentry);
+
+       err = vfs_symlink(d_inode(lower_parent_dentry), lower_dentry, symname);
+       if (err)
+               goto out;
+       err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
+       if (err)
+               goto out;
+       fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
+       fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
+
+out:
+       unlock_dir(lower_parent_dentry);
+       sdcardfs_put_lower_path(dentry, &lower_path);
+       REVERT_CRED();
+       return err;
+}
+#endif
+
+static int touch(char *abs_path, mode_t mode) {
+       struct file *filp = filp_open(abs_path, O_RDWR|O_CREAT|O_EXCL|O_NOFOLLOW, mode);
+       if (IS_ERR(filp)) {
+               if (PTR_ERR(filp) == -EEXIST) {
+                       return 0;
+               }
+               else {
+                       printk(KERN_ERR "sdcardfs: failed to open(%s): %ld\n",
+                                               abs_path, PTR_ERR(filp));
+                       return PTR_ERR(filp);
+               }
+       }
+       filp_close(filp, current->files);
+       return 0;
+}
+
+static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+       int err;
+       int make_nomedia_in_obb = 0;
+       struct dentry *lower_dentry;
+       struct dentry *lower_parent_dentry = NULL;
+       struct path lower_path;
+       struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+       const struct cred *saved_cred = NULL;
+       struct sdcardfs_inode_info *pi = SDCARDFS_I(dir);
+       char *page_buf;
+       char *nomedia_dir_name;
+       char *nomedia_fullpath;
+       int fullpath_namelen;
+       int touch_err = 0;
+
+       if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
+               printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+                                                "  dentry: %s, task:%s\n",
+                                                __func__, dentry->d_name.name, current->comm);
+               err = -EACCES;
+               goto out_eacces;
+       }
+
+       /* save current_cred and override it */
+       OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+
+       /* check disk space */
+       if (!check_min_free_space(dentry, 0, 1)) {
+               printk(KERN_INFO "sdcardfs: No minimum free space.\n");
+               err = -ENOSPC;
+               goto out_revert;
+       }
+
+       /* the lower_dentry is negative here */
+       sdcardfs_get_lower_path(dentry, &lower_path);
+       lower_dentry = lower_path.dentry;
+       lower_parent_dentry = lock_parent(lower_dentry);
+
+       /* set last 16bytes of mode field to 0775 */
+       mode = (mode & S_IFMT) | 00775;
+       err = vfs_mkdir(d_inode(lower_parent_dentry), lower_dentry, mode);
+
+       if (err)
+               goto out;
+
+       /* if it is a local obb dentry, setup it with the base obbpath */
+       if(need_graft_path(dentry)) {
+
+               err = setup_obb_dentry(dentry, &lower_path);
+               if(err) {
+                       /* if the sbi->obbpath is not available, the lower_path won't be
+                        * changed by setup_obb_dentry() but the lower path is saved to
+                        * its orig_path. this dentry will be revalidated later.
+                        * but now, the lower_path should be NULL */
+                       sdcardfs_put_reset_lower_path(dentry);
+
+                       /* the newly created lower path which saved to its orig_path or
+                        * the lower_path is the base obbpath.
+                        * therefore, an additional path_get is required */
+                       path_get(&lower_path);
+               } else
+                       make_nomedia_in_obb = 1;
+       }
+
+       err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path, pi->userid);
+       if (err)
+               goto out;
+
+       fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
+       fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
+       /* update number of links on parent directory */
+       set_nlink(dir, sdcardfs_lower_inode(dir)->i_nlink);
+
+       if ((!sbi->options.multiuser) && (!strcasecmp(dentry->d_name.name, "obb"))
+               && (pi->perm == PERM_ANDROID) && (pi->userid == 0))
+               make_nomedia_in_obb = 1;
+
+       /* When creating /Android/data and /Android/obb, mark them as .nomedia */
+       if (make_nomedia_in_obb ||
+               ((pi->perm == PERM_ANDROID) && (!strcasecmp(dentry->d_name.name, "data")))) {
+
+               page_buf = (char *)__get_free_page(GFP_KERNEL);
+               if (!page_buf) {
+                       printk(KERN_ERR "sdcardfs: failed to allocate page buf\n");
+                       goto out;
+               }
+
+               nomedia_dir_name = d_absolute_path(&lower_path, page_buf, PAGE_SIZE);
+               if (IS_ERR(nomedia_dir_name)) {
+                       free_page((unsigned long)page_buf);
+                       printk(KERN_ERR "sdcardfs: failed to get .nomedia dir name\n");
+                       goto out;
+               }
+
+               fullpath_namelen = page_buf + PAGE_SIZE - nomedia_dir_name - 1;
+               fullpath_namelen += strlen("/.nomedia");
+               nomedia_fullpath = kzalloc(fullpath_namelen + 1, GFP_KERNEL);
+               if (!nomedia_fullpath) {
+                       free_page((unsigned long)page_buf);
+                       printk(KERN_ERR "sdcardfs: failed to allocate .nomedia fullpath buf\n");
+                       goto out;
+               }
+
+               strcpy(nomedia_fullpath, nomedia_dir_name);
+               free_page((unsigned long)page_buf);
+               strcat(nomedia_fullpath, "/.nomedia");
+               touch_err = touch(nomedia_fullpath, 0664);
+               if (touch_err) {
+                       printk(KERN_ERR "sdcardfs: failed to touch(%s): %d\n",
+                                                       nomedia_fullpath, touch_err);
+                       kfree(nomedia_fullpath);
+                       goto out;
+               }
+               kfree(nomedia_fullpath);
+       }
+out:
+       unlock_dir(lower_parent_dentry);
+       sdcardfs_put_lower_path(dentry, &lower_path);
+out_revert:
+       REVERT_CRED(saved_cred);
+out_eacces:
+       return err;
+}
+
+static int sdcardfs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+       struct dentry *lower_dentry;
+       struct dentry *lower_dir_dentry;
+       int err;
+       struct path lower_path;
+       const struct cred *saved_cred = NULL;
+
+       if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
+               printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+                                                "  dentry: %s, task:%s\n",
+                                                __func__, dentry->d_name.name, current->comm);
+               err = -EACCES;
+               goto out_eacces;
+       }
+
+       /* save current_cred and override it */
+       OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+
+       /* sdcardfs_get_real_lower(): in case of remove an user's obb dentry
+        * the dentry on the original path should be deleted. */
+       sdcardfs_get_real_lower(dentry, &lower_path);
+
+       lower_dentry = lower_path.dentry;
+       lower_dir_dentry = lock_parent(lower_dentry);
+
+       err = vfs_rmdir(d_inode(lower_dir_dentry), lower_dentry);
+       if (err)
+               goto out;
+
+       d_drop(dentry); /* drop our dentry on success (why not VFS's job?) */
+       if (d_inode(dentry))
+               clear_nlink(d_inode(dentry));
+       fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry));
+       fsstack_copy_inode_size(dir, d_inode(lower_dir_dentry));
+       set_nlink(dir, d_inode(lower_dir_dentry)->i_nlink);
+
+out:
+       unlock_dir(lower_dir_dentry);
+       sdcardfs_put_real_lower(dentry, &lower_path);
+       REVERT_CRED(saved_cred);
+out_eacces:
+       return err;
+}
+
+#if 0
+static int sdcardfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+                       dev_t dev)
+{
+       int err;
+       struct dentry *lower_dentry;
+       struct dentry *lower_parent_dentry = NULL;
+       struct path lower_path;
+
+       OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
+
+       sdcardfs_get_lower_path(dentry, &lower_path);
+       lower_dentry = lower_path.dentry;
+       lower_parent_dentry = lock_parent(lower_dentry);
+
+       err = vfs_mknod(d_inode(lower_parent_dentry), lower_dentry, mode, dev);
+       if (err)
+               goto out;
+
+       err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
+       if (err)
+               goto out;
+       fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
+       fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
+
+out:
+       unlock_dir(lower_parent_dentry);
+       sdcardfs_put_lower_path(dentry, &lower_path);
+       REVERT_CRED();
+       return err;
+}
+#endif
+
+/*
+ * The locking rules in sdcardfs_rename are complex.  We could use a simpler
+ * superblock-level name-space lock for renames and copy-ups.
+ */
+static int sdcardfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+                        struct inode *new_dir, struct dentry *new_dentry)
+{
+       int err = 0;
+       struct dentry *lower_old_dentry = NULL;
+       struct dentry *lower_new_dentry = NULL;
+       struct dentry *lower_old_dir_dentry = NULL;
+       struct dentry *lower_new_dir_dentry = NULL;
+       struct dentry *trap = NULL;
+       struct dentry *new_parent = NULL;
+       struct path lower_old_path, lower_new_path;
+       const struct cred *saved_cred = NULL;
+
+       if(!check_caller_access_to_name(old_dir, old_dentry->d_name.name) ||
+               !check_caller_access_to_name(new_dir, new_dentry->d_name.name)) {
+               printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+                                                "  new_dentry: %s, task:%s\n",
+                                                __func__, new_dentry->d_name.name, current->comm);
+               err = -EACCES;
+               goto out_eacces;
+       }
+
+       /* save current_cred and override it */
+       OVERRIDE_CRED(SDCARDFS_SB(old_dir->i_sb), saved_cred);
+
+       sdcardfs_get_real_lower(old_dentry, &lower_old_path);
+       sdcardfs_get_lower_path(new_dentry, &lower_new_path);
+       lower_old_dentry = lower_old_path.dentry;
+       lower_new_dentry = lower_new_path.dentry;
+       lower_old_dir_dentry = dget_parent(lower_old_dentry);
+       lower_new_dir_dentry = dget_parent(lower_new_dentry);
+
+       trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+       /* source should not be ancestor of target */
+       if (trap == lower_old_dentry) {
+               err = -EINVAL;
+               goto out;
+       }
+       /* target should not be ancestor of source */
+       if (trap == lower_new_dentry) {
+               err = -ENOTEMPTY;
+               goto out;
+       }
+
+       err = vfs_rename(d_inode(lower_old_dir_dentry), lower_old_dentry,
+                        d_inode(lower_new_dir_dentry), lower_new_dentry,
+                        NULL, 0);
+       if (err)
+               goto out;
+
+       /* Copy attrs from lower dir, but i_uid/i_gid */
+       sdcardfs_copy_and_fix_attrs(new_dir, d_inode(lower_new_dir_dentry));
+       fsstack_copy_inode_size(new_dir, d_inode(lower_new_dir_dentry));
+
+       if (new_dir != old_dir) {
+               sdcardfs_copy_and_fix_attrs(old_dir, d_inode(lower_old_dir_dentry));
+               fsstack_copy_inode_size(old_dir, d_inode(lower_old_dir_dentry));
+
+               /* update the derived permission of the old_dentry
+                * with its new parent
+                */
+               new_parent = dget_parent(new_dentry);
+               if(new_parent) {
+                       if(d_inode(old_dentry)) {
+                               update_derived_permission_lock(old_dentry);
+                       }
+                       dput(new_parent);
+               }
+       }
+       /* At this point, not all dentry information has been moved, so
+        * we pass along new_dentry for the name.*/
+       mutex_lock(&d_inode(old_dentry)->i_mutex);
+       get_derived_permission_new(new_dentry->d_parent, old_dentry, new_dentry);
+       fix_derived_permission(d_inode(old_dentry));
+       get_derive_permissions_recursive(old_dentry);
+       mutex_unlock(&d_inode(old_dentry)->i_mutex);
+out:
+       unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+       dput(lower_old_dir_dentry);
+       dput(lower_new_dir_dentry);
+       sdcardfs_put_real_lower(old_dentry, &lower_old_path);
+       sdcardfs_put_lower_path(new_dentry, &lower_new_path);
+       REVERT_CRED(saved_cred);
+out_eacces:
+       return err;
+}
+
+#if 0
+static int sdcardfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+{
+       int err;
+       struct dentry *lower_dentry;
+       struct path lower_path;
+       /* XXX readlink does not requires overriding credential */
+
+       sdcardfs_get_lower_path(dentry, &lower_path);
+       lower_dentry = lower_path.dentry;
+       if (!d_inode(lower_dentry)->i_op ||
+           !d_inode(lower_dentry)->i_op->readlink) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       err = d_inode(lower_dentry)->i_op->readlink(lower_dentry,
+                                                   buf, bufsiz);
+       if (err < 0)
+               goto out;
+       fsstack_copy_attr_atime(d_inode(dentry), d_inode(lower_dentry));
+
+out:
+       sdcardfs_put_lower_path(dentry, &lower_path);
+       return err;
+}
+#endif
+
+#if 0
+static const char *sdcardfs_follow_link(struct dentry *dentry, void **cookie)
+{
+       char *buf;
+       int len = PAGE_SIZE, err;
+       mm_segment_t old_fs;
+
+       /* This is freed by the put_link method assuming a successful call. */
+       buf = kmalloc(len, GFP_KERNEL);
+       if (!buf) {
+               buf = ERR_PTR(-ENOMEM);
+               return buf;
+       }
+
+       /* read the symlink, and then we will follow it */
+       old_fs = get_fs();
+       set_fs(KERNEL_DS);
+       err = sdcardfs_readlink(dentry, buf, len);
+       set_fs(old_fs);
+       if (err < 0) {
+               kfree(buf);
+               buf = ERR_PTR(err);
+       } else {
+               buf[err] = '\0';
+       }
+       return *cookie = buf;
+}
+#endif
+
+static int sdcardfs_permission(struct inode *inode, int mask)
+{
+       int err;
+
+       /*
+        * Permission check on sdcardfs inode.
+        * Calling process should have AID_SDCARD_RW permission
+        */
+       err = generic_permission(inode, mask);
+
+       /* XXX
+        * Original sdcardfs code calls inode_permission(lower_inode,.. )
+        * for checking inode permission. But doing such things here seems
+        * duplicated work, because the functions called after this func,
+        * such as vfs_create, vfs_unlink, vfs_rename, and etc,
+        * does exactly same thing, i.e., they calls inode_permission().
+        * So we just let they do the things.
+        * If there are any security hole, just uncomment following if block.
+        */
+#if 0
+       if (!err) {
+               /*
+                * Permission check on lower_inode(=EXT4).
+                * we check it with AID_MEDIA_RW permission
+                */
+               struct inode *lower_inode;
+               OVERRIDE_CRED(SDCARDFS_SB(inode->sb));
+
+               lower_inode = sdcardfs_lower_inode(inode);
+               err = inode_permission(lower_inode, mask);
+
+               REVERT_CRED();
+       }
+#endif
+       return err;
+
+}
+
+static int sdcardfs_setattr(struct dentry *dentry, struct iattr *ia)
+{
+       int err;
+       struct dentry *lower_dentry;
+       struct inode *inode;
+       struct inode *lower_inode;
+       struct path lower_path;
+       struct iattr lower_ia;
+       struct dentry *parent;
+
+       inode = d_inode(dentry);
+
+       /*
+        * Check if user has permission to change inode.  We don't check if
+        * this user can change the lower inode: that should happen when
+        * calling notify_change on the lower inode.
+        */
+       err = inode_change_ok(inode, ia);
+
+       /* no vfs_XXX operations required, cred overriding will be skipped. wj*/
+       if (!err) {
+               /* check the Android group ID */
+               parent = dget_parent(dentry);
+               if(!check_caller_access_to_name(d_inode(parent), dentry->d_name.name)) {
+                       printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+                                                        "  dentry: %s, task:%s\n",
+                                                        __func__, dentry->d_name.name, current->comm);
+                       err = -EACCES;
+               }
+               dput(parent);
+       }
+
+       if (err)
+               goto out_err;
+
+       sdcardfs_get_lower_path(dentry, &lower_path);
+       lower_dentry = lower_path.dentry;
+       lower_inode = sdcardfs_lower_inode(inode);
+
+       /* prepare our own lower struct iattr (with the lower file) */
+       memcpy(&lower_ia, ia, sizeof(lower_ia));
+       if (ia->ia_valid & ATTR_FILE)
+               lower_ia.ia_file = sdcardfs_lower_file(ia->ia_file);
+
+       lower_ia.ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE);
+
+       /*
+        * If shrinking, first truncate upper level to cancel writing dirty
+        * pages beyond the new eof; and also if its' maxbytes is more
+        * limiting (fail with -EFBIG before making any change to the lower
+        * level).  There is no need to vmtruncate the upper level
+        * afterwards in the other cases: we fsstack_copy_inode_size from
+        * the lower level.
+        */
+       if (current->mm)
+               down_write(&current->mm->mmap_sem);
+       if (ia->ia_valid & ATTR_SIZE) {
+               err = inode_newsize_ok(inode, ia->ia_size);
+               if (err) {
+                       if (current->mm)
+                               up_write(&current->mm->mmap_sem);
+                       goto out;
+               }
+               truncate_setsize(inode, ia->ia_size);
+       }
+
+       /*
+        * mode change is for clearing setuid/setgid bits. Allow lower fs
+        * to interpret this in its own way.
+        */
+       if (lower_ia.ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
+               lower_ia.ia_valid &= ~ATTR_MODE;
+
+       /* notify the (possibly copied-up) lower inode */
+       /*
+        * Note: we use d_inode(lower_dentry), because lower_inode may be
+        * unlinked (no inode->i_sb and i_ino==0.  This happens if someone
+        * tries to open(), unlink(), then ftruncate() a file.
+        */
+       mutex_lock(&d_inode(lower_dentry)->i_mutex);
+       err = notify_change(lower_dentry, &lower_ia, /* note: lower_ia */
+                       NULL);
+       mutex_unlock(&d_inode(lower_dentry)->i_mutex);
+       if (current->mm)
+               up_write(&current->mm->mmap_sem);
+       if (err)
+               goto out;
+
+       /* get attributes from the lower inode and update derived permissions */
+       sdcardfs_copy_and_fix_attrs(inode, lower_inode);
+
+       /*
+        * Not running fsstack_copy_inode_size(inode, lower_inode), because
+        * VFS should update our inode size, and notify_change on
+        * lower_inode should update its size.
+        */
+
+out:
+       sdcardfs_put_lower_path(dentry, &lower_path);
+out_err:
+       return err;
+}
+
+static int sdcardfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
+                struct kstat *stat)
+{
+       struct dentry *lower_dentry;
+       struct inode *inode;
+       struct inode *lower_inode;
+       struct path lower_path;
+       struct dentry *parent;
+
+       parent = dget_parent(dentry);
+       if(!check_caller_access_to_name(d_inode(parent), dentry->d_name.name)) {
+               printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+                                                "  dentry: %s, task:%s\n",
+                                                __func__, dentry->d_name.name, current->comm);
+               dput(parent);
+               return -EACCES;
+       }
+       dput(parent);
+
+       inode = d_inode(dentry);
+
+       sdcardfs_get_lower_path(dentry, &lower_path);
+       lower_dentry = lower_path.dentry;
+       lower_inode = sdcardfs_lower_inode(inode);
+
+
+       sdcardfs_copy_and_fix_attrs(inode, lower_inode);
+       fsstack_copy_inode_size(inode, lower_inode);
+
+
+       generic_fillattr(inode, stat);
+       sdcardfs_put_lower_path(dentry, &lower_path);
+       return 0;
+}
+
+const struct inode_operations sdcardfs_symlink_iops = {
+       .permission     = sdcardfs_permission,
+       .setattr        = sdcardfs_setattr,
+       /* XXX Following operations are implemented,
+        *     but FUSE(sdcard) or FAT does not support them
+        *     These methods are *NOT* perfectly tested.
+       .readlink       = sdcardfs_readlink,
+       .follow_link    = sdcardfs_follow_link,
+       .put_link       = kfree_put_link,
+        */
+};
+
+const struct inode_operations sdcardfs_dir_iops = {
+       .create         = sdcardfs_create,
+       .lookup         = sdcardfs_lookup,
+#if 0
+       .permission     = sdcardfs_permission,
+#endif
+       .unlink         = sdcardfs_unlink,
+       .mkdir          = sdcardfs_mkdir,
+       .rmdir          = sdcardfs_rmdir,
+       .rename         = sdcardfs_rename,
+       .setattr        = sdcardfs_setattr,
+       .getattr        = sdcardfs_getattr,
+       /* XXX Following operations are implemented,
+        *     but FUSE(sdcard) or FAT does not support them
+        *     These methods are *NOT* perfectly tested.
+       .symlink        = sdcardfs_symlink,
+       .link           = sdcardfs_link,
+       .mknod          = sdcardfs_mknod,
+        */
+};
+
+const struct inode_operations sdcardfs_main_iops = {
+       .permission     = sdcardfs_permission,
+       .setattr        = sdcardfs_setattr,
+       .getattr        = sdcardfs_getattr,
+};
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
new file mode 100644 (file)
index 0000000..a01b06a
--- /dev/null
@@ -0,0 +1,384 @@
+/*
+ * fs/sdcardfs/lookup.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+#include "linux/delay.h"
+
+/* The dentry cache is just so we have properly sized dentries */
+static struct kmem_cache *sdcardfs_dentry_cachep;
+
+int sdcardfs_init_dentry_cache(void)
+{
+       sdcardfs_dentry_cachep =
+               kmem_cache_create("sdcardfs_dentry",
+                                 sizeof(struct sdcardfs_dentry_info),
+                                 0, SLAB_RECLAIM_ACCOUNT, NULL);
+
+       return sdcardfs_dentry_cachep ? 0 : -ENOMEM;
+}
+
+void sdcardfs_destroy_dentry_cache(void)
+{
+       if (sdcardfs_dentry_cachep)
+               kmem_cache_destroy(sdcardfs_dentry_cachep);
+}
+
+void free_dentry_private_data(struct dentry *dentry)
+{
+       if (!dentry || !dentry->d_fsdata)
+               return;
+       kmem_cache_free(sdcardfs_dentry_cachep, dentry->d_fsdata);
+       dentry->d_fsdata = NULL;
+}
+
+/* allocate new dentry private data */
+int new_dentry_private_data(struct dentry *dentry)
+{
+       struct sdcardfs_dentry_info *info = SDCARDFS_D(dentry);
+
+       /* use zalloc to init dentry_info.lower_path */
+       info = kmem_cache_zalloc(sdcardfs_dentry_cachep, GFP_ATOMIC);
+       if (!info)
+               return -ENOMEM;
+
+       spin_lock_init(&info->lock);
+       dentry->d_fsdata = info;
+
+       return 0;
+}
+
+struct inode_data {
+       struct inode *lower_inode;
+       userid_t id;
+};
+
+static int sdcardfs_inode_test(struct inode *inode, void *candidate_data/*void *candidate_lower_inode*/)
+{
+       struct inode *current_lower_inode = sdcardfs_lower_inode(inode);
+       userid_t current_userid = SDCARDFS_I(inode)->userid;
+       if (current_lower_inode == ((struct inode_data *)candidate_data)->lower_inode &&
+                       current_userid == ((struct inode_data *)candidate_data)->id)
+               return 1; /* found a match */
+       else
+               return 0; /* no match */
+}
+
+static int sdcardfs_inode_set(struct inode *inode, void *lower_inode)
+{
+       /* we do actual inode initialization in sdcardfs_iget */
+       return 0;
+}
+
+struct inode *sdcardfs_iget(struct super_block *sb, struct inode *lower_inode, userid_t id)
+{
+       struct sdcardfs_inode_info *info;
+       struct inode_data data;
+       struct inode *inode; /* the new inode to return */
+       int err;
+
+       data.id = id;
+       data.lower_inode = lower_inode;
+       inode = iget5_locked(sb, /* our superblock */
+                            /*
+                             * hashval: we use inode number, but we can
+                             * also use "(unsigned long)lower_inode"
+                             * instead.
+                             */
+                            lower_inode->i_ino, /* hashval */
+                            sdcardfs_inode_test,       /* inode comparison function */
+                            sdcardfs_inode_set, /* inode init function */
+                            &data); /* data passed to test+set fxns */
+       if (!inode) {
+               err = -EACCES;
+               iput(lower_inode);
+               return ERR_PTR(err);
+       }
+       /* if found a cached inode, then just return it */
+       if (!(inode->i_state & I_NEW))
+               return inode;
+
+       /* initialize new inode */
+       info = SDCARDFS_I(inode);
+
+       inode->i_ino = lower_inode->i_ino;
+       if (!igrab(lower_inode)) {
+               err = -ESTALE;
+               return ERR_PTR(err);
+       }
+       sdcardfs_set_lower_inode(inode, lower_inode);
+
+       inode->i_version++;
+
+       /* use different set of inode ops for symlinks & directories */
+       if (S_ISDIR(lower_inode->i_mode))
+               inode->i_op = &sdcardfs_dir_iops;
+       else if (S_ISLNK(lower_inode->i_mode))
+               inode->i_op = &sdcardfs_symlink_iops;
+       else
+               inode->i_op = &sdcardfs_main_iops;
+
+       /* use different set of file ops for directories */
+       if (S_ISDIR(lower_inode->i_mode))
+               inode->i_fop = &sdcardfs_dir_fops;
+       else
+               inode->i_fop = &sdcardfs_main_fops;
+
+       inode->i_mapping->a_ops = &sdcardfs_aops;
+
+       inode->i_atime.tv_sec = 0;
+       inode->i_atime.tv_nsec = 0;
+       inode->i_mtime.tv_sec = 0;
+       inode->i_mtime.tv_nsec = 0;
+       inode->i_ctime.tv_sec = 0;
+       inode->i_ctime.tv_nsec = 0;
+
+       /* properly initialize special inodes */
+       if (S_ISBLK(lower_inode->i_mode) || S_ISCHR(lower_inode->i_mode) ||
+           S_ISFIFO(lower_inode->i_mode) || S_ISSOCK(lower_inode->i_mode))
+               init_special_inode(inode, lower_inode->i_mode,
+                                  lower_inode->i_rdev);
+
+       /* all well, copy inode attributes */
+       sdcardfs_copy_and_fix_attrs(inode, lower_inode);
+       fsstack_copy_inode_size(inode, lower_inode);
+
+       unlock_new_inode(inode);
+       return inode;
+}
+
+/*
+ * Connect a sdcardfs inode dentry/inode with several lower ones.  This is
+ * the classic stackable file system "vnode interposition" action.
+ *
+ * @dentry: sdcardfs's dentry which interposes on lower one
+ * @sb: sdcardfs's super_block
+ * @lower_path: the lower path (caller does path_get/put)
+ */
+int sdcardfs_interpose(struct dentry *dentry, struct super_block *sb,
+                    struct path *lower_path, userid_t id)
+{
+       int err = 0;
+       struct inode *inode;
+       struct inode *lower_inode;
+       struct super_block *lower_sb;
+
+       lower_inode = lower_path->dentry->d_inode;
+       lower_sb = sdcardfs_lower_super(sb);
+
+       /* check that the lower file system didn't cross a mount point */
+       if (lower_inode->i_sb != lower_sb) {
+               err = -EXDEV;
+               goto out;
+       }
+
+       /*
+        * We allocate our new inode below by calling sdcardfs_iget,
+        * which will initialize some of the new inode's fields
+        */
+
+       /* inherit lower inode number for sdcardfs's inode */
+       inode = sdcardfs_iget(sb, lower_inode, id);
+       if (IS_ERR(inode)) {
+               err = PTR_ERR(inode);
+               goto out;
+       }
+
+       d_add(dentry, inode);
+       update_derived_permission_lock(dentry);
+out:
+       return err;
+}
+
+/*
+ * Main driver function for sdcardfs's lookup.
+ *
+ * Returns: NULL (ok), ERR_PTR if an error occurred.
+ * Fills in lower_parent_path with <dentry,mnt> on success.
+ */
+static struct dentry *__sdcardfs_lookup(struct dentry *dentry,
+               unsigned int flags, struct path *lower_parent_path, userid_t id)
+{
+       int err = 0;
+       struct vfsmount *lower_dir_mnt;
+       struct dentry *lower_dir_dentry = NULL;
+       struct dentry *lower_dentry;
+       const char *name;
+       struct path lower_path;
+       struct qstr this;
+       struct sdcardfs_sb_info *sbi;
+
+       sbi = SDCARDFS_SB(dentry->d_sb);
+       /* must initialize dentry operations */
+       d_set_d_op(dentry, &sdcardfs_ci_dops);
+
+       if (IS_ROOT(dentry))
+               goto out;
+
+       name = dentry->d_name.name;
+
+       /* now start the actual lookup procedure */
+       lower_dir_dentry = lower_parent_path->dentry;
+       lower_dir_mnt = lower_parent_path->mnt;
+
+       /* Use vfs_path_lookup to check if the dentry exists or not */
+       err = vfs_path_lookup(lower_dir_dentry, lower_dir_mnt, name, 0,
+                               &lower_path);
+
+       /* no error: handle positive dentries */
+       if (!err) {
+               /* check if the dentry is an obb dentry
+                * if true, the lower_inode must be replaced with
+                * the inode of the graft path */
+
+               if(need_graft_path(dentry)) {
+
+                       /* setup_obb_dentry()
+                        * The lower_path will be stored to the dentry's orig_path
+                        * and the base obbpath will be copyed to the lower_path variable.
+                        * if an error returned, there's no change in the lower_path
+                        *              returns: -ERRNO if error (0: no error) */
+                       err = setup_obb_dentry(dentry, &lower_path);
+
+                       if(err) {
+                               /* if the sbi->obbpath is not available, we can optionally
+                                * setup the lower_path with its orig_path.
+                                * but, the current implementation just returns an error
+                                * because the sdcard daemon also regards this case as
+                                * a lookup fail. */
+                               printk(KERN_INFO "sdcardfs: base obbpath is not available\n");
+                               sdcardfs_put_reset_orig_path(dentry);
+                               goto out;
+                       }
+               }
+
+               sdcardfs_set_lower_path(dentry, &lower_path);
+               err = sdcardfs_interpose(dentry, dentry->d_sb, &lower_path, id);
+               if (err) /* path_put underlying path on error */
+                       sdcardfs_put_reset_lower_path(dentry);
+               goto out;
+       }
+
+       /*
+        * We don't consider ENOENT an error, and we want to return a
+        * negative dentry.
+        */
+       if (err && err != -ENOENT)
+               goto out;
+
+       /* instatiate a new negative dentry */
+       this.name = name;
+       this.len = strlen(name);
+       this.hash = full_name_hash(this.name, this.len);
+       lower_dentry = d_lookup(lower_dir_dentry, &this);
+       if (lower_dentry)
+               goto setup_lower;
+
+       lower_dentry = d_alloc(lower_dir_dentry, &this);
+       if (!lower_dentry) {
+               err = -ENOMEM;
+               goto out;
+       }
+       d_add(lower_dentry, NULL); /* instantiate and hash */
+
+setup_lower:
+       lower_path.dentry = lower_dentry;
+       lower_path.mnt = mntget(lower_dir_mnt);
+       sdcardfs_set_lower_path(dentry, &lower_path);
+
+       /*
+        * If the intent is to create a file, then don't return an error, so
+        * the VFS will continue the process of making this negative dentry
+        * into a positive one.
+        */
+       if (flags & (LOOKUP_CREATE|LOOKUP_RENAME_TARGET))
+               err = 0;
+
+out:
+       return ERR_PTR(err);
+}
+
+/*
+ * On success:
+ *     fills dentry object appropriate values and returns NULL.
+ * On fail (== error)
+ *     returns error ptr
+ *
+ * @dir : Parent inode. It is locked (dir->i_mutex)
+ * @dentry : Target dentry to lookup. we should set each of fields.
+ *          (dentry->d_name is initialized already)
+ * @nd : nameidata of parent inode
+ */
+struct dentry *sdcardfs_lookup(struct inode *dir, struct dentry *dentry,
+                            unsigned int flags)
+{
+       struct dentry *ret = NULL, *parent;
+       struct path lower_parent_path;
+       int err = 0;
+       const struct cred *saved_cred = NULL;
+
+       parent = dget_parent(dentry);
+
+       if(!check_caller_access_to_name(parent->d_inode, dentry->d_name.name)) {
+               ret = ERR_PTR(-EACCES);
+               printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+                         "     dentry: %s, task:%s\n",
+                                                __func__, dentry->d_name.name, current->comm);
+               goto out_err;
+        }
+
+       /* save current_cred and override it */
+       OVERRIDE_CRED_PTR(SDCARDFS_SB(dir->i_sb), saved_cred);
+
+       sdcardfs_get_lower_path(parent, &lower_parent_path);
+
+       /* allocate dentry private data.  We free it in ->d_release */
+       err = new_dentry_private_data(dentry);
+       if (err) {
+               ret = ERR_PTR(err);
+               goto out;
+       }
+
+       ret = __sdcardfs_lookup(dentry, flags, &lower_parent_path, SDCARDFS_I(dir)->userid);
+       if (IS_ERR(ret))
+       {
+               goto out;
+       }
+       if (ret)
+               dentry = ret;
+       if (dentry->d_inode) {
+               fsstack_copy_attr_times(dentry->d_inode,
+                                       sdcardfs_lower_inode(dentry->d_inode));
+               /* get drived permission */
+               mutex_lock(&dentry->d_inode->i_mutex);
+               get_derived_permission(parent, dentry);
+               fix_derived_permission(dentry->d_inode);
+               mutex_unlock(&dentry->d_inode->i_mutex);
+       }
+       /* update parent directory's atime */
+       fsstack_copy_attr_atime(parent->d_inode,
+                               sdcardfs_lower_inode(parent->d_inode));
+
+out:
+       sdcardfs_put_lower_path(parent, &lower_parent_path);
+       REVERT_CRED(saved_cred);
+out_err:
+       dput(parent);
+       return ret;
+}
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
new file mode 100644 (file)
index 0000000..a652228
--- /dev/null
@@ -0,0 +1,402 @@
+/*
+ * fs/sdcardfs/main.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/parser.h>
+
+enum {
+       Opt_fsuid,
+       Opt_fsgid,
+       Opt_gid,
+       Opt_debug,
+       Opt_lower_fs,
+       Opt_mask,
+       Opt_multiuser, // May need?
+       Opt_userid,
+       Opt_reserved_mb,
+       Opt_err,
+};
+
+static const match_table_t sdcardfs_tokens = {
+       {Opt_fsuid, "fsuid=%u"},
+       {Opt_fsgid, "fsgid=%u"},
+       {Opt_gid, "gid=%u"},
+       {Opt_debug, "debug"},
+       {Opt_mask, "mask=%u"},
+       {Opt_userid, "userid=%d"},
+       {Opt_multiuser, "multiuser"},
+       {Opt_reserved_mb, "reserved_mb=%u"},
+       {Opt_err, NULL}
+};
+
+static int parse_options(struct super_block *sb, char *options, int silent,
+                               int *debug, struct sdcardfs_mount_options *opts)
+{
+       char *p;
+       substring_t args[MAX_OPT_ARGS];
+       int option;
+
+       /* by default, we use AID_MEDIA_RW as uid, gid */
+       opts->fs_low_uid = AID_MEDIA_RW;
+       opts->fs_low_gid = AID_MEDIA_RW;
+       opts->mask = 0;
+       opts->multiuser = false;
+       opts->fs_user_id = 0;
+       opts->gid = 0;
+       /* by default, 0MB is reserved */
+       opts->reserved_mb = 0;
+
+       *debug = 0;
+
+       if (!options)
+               return 0;
+
+       while ((p = strsep(&options, ",")) != NULL) {
+               int token;
+               if (!*p)
+                       continue;
+
+               token = match_token(p, sdcardfs_tokens, args);
+
+               switch (token) {
+               case Opt_debug:
+                       *debug = 1;
+                       break;
+               case Opt_fsuid:
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       opts->fs_low_uid = option;
+                       break;
+               case Opt_fsgid:
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       opts->fs_low_gid = option;
+                       break;
+               case Opt_gid:
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       opts->gid = option;
+                       break;
+               case Opt_userid:
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       opts->fs_user_id = option;
+                       break;
+               case Opt_mask:
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       opts->mask = option;
+                       break;
+               case Opt_multiuser:
+                       opts->multiuser = true;
+                       break;
+               case Opt_reserved_mb:
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       opts->reserved_mb = option;
+                       break;
+               /* unknown option */
+               default:
+                       if (!silent) {
+                               printk( KERN_ERR "Unrecognized mount option \"%s\" "
+                                               "or missing value", p);
+                       }
+                       return -EINVAL;
+               }
+       }
+
+       if (*debug) {
+               printk( KERN_INFO "sdcardfs : options - debug:%d\n", *debug);
+               printk( KERN_INFO "sdcardfs : options - uid:%d\n",
+                                                       opts->fs_low_uid);
+               printk( KERN_INFO "sdcardfs : options - gid:%d\n",
+                                                       opts->fs_low_gid);
+       }
+
+       return 0;
+}
+
+#if 0
+/*
+ * our custom d_alloc_root work-alike
+ *
+ * we can't use d_alloc_root if we want to use our own interpose function
+ * unchanged, so we simply call our own "fake" d_alloc_root
+ */
+static struct dentry *sdcardfs_d_alloc_root(struct super_block *sb)
+{
+       struct dentry *ret = NULL;
+
+       if (sb) {
+               static const struct qstr name = {
+                       .name = "/",
+                       .len = 1
+               };
+
+               ret = d_alloc(NULL, &name);
+               if (ret) {
+                       d_set_d_op(ret, &sdcardfs_ci_dops);
+                       ret->d_sb = sb;
+                       ret->d_parent = ret;
+               }
+       }
+       return ret;
+}
+#endif
+
+DEFINE_MUTEX(sdcardfs_super_list_lock);
+LIST_HEAD(sdcardfs_super_list);
+EXPORT_SYMBOL_GPL(sdcardfs_super_list_lock);
+EXPORT_SYMBOL_GPL(sdcardfs_super_list);
+
+/*
+ * There is no need to lock the sdcardfs_super_info's rwsem as there is no
+ * way anyone can have a reference to the superblock at this point in time.
+ */
+static int sdcardfs_read_super(struct super_block *sb, const char *dev_name,
+                                               void *raw_data, int silent)
+{
+       int err = 0;
+       int debug;
+       struct super_block *lower_sb;
+       struct path lower_path;
+       struct sdcardfs_sb_info *sb_info;
+       struct inode *inode;
+
+       printk(KERN_INFO "sdcardfs version 2.0\n");
+
+       if (!dev_name) {
+               printk(KERN_ERR
+                      "sdcardfs: read_super: missing dev_name argument\n");
+               err = -EINVAL;
+               goto out;
+       }
+
+       printk(KERN_INFO "sdcardfs: dev_name -> %s\n", dev_name);
+       printk(KERN_INFO "sdcardfs: options -> %s\n", (char *)raw_data);
+
+       /* parse lower path */
+       err = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
+                       &lower_path);
+       if (err) {
+               printk(KERN_ERR "sdcardfs: error accessing lower directory '%s'\n", dev_name);
+               goto out;
+       }
+
+       /* allocate superblock private data */
+       sb->s_fs_info = kzalloc(sizeof(struct sdcardfs_sb_info), GFP_KERNEL);
+       if (!SDCARDFS_SB(sb)) {
+               printk(KERN_CRIT "sdcardfs: read_super: out of memory\n");
+               err = -ENOMEM;
+               goto out_free;
+       }
+
+       sb_info = sb->s_fs_info;
+       /* parse options */
+       err = parse_options(sb, raw_data, silent, &debug, &sb_info->options);
+       if (err) {
+               printk(KERN_ERR "sdcardfs: invalid options\n");
+               goto out_freesbi;
+       }
+
+       /* set the lower superblock field of upper superblock */
+       lower_sb = lower_path.dentry->d_sb;
+       atomic_inc(&lower_sb->s_active);
+       sdcardfs_set_lower_super(sb, lower_sb);
+
+       /* inherit maxbytes from lower file system */
+       sb->s_maxbytes = lower_sb->s_maxbytes;
+
+       /*
+        * Our c/m/atime granularity is 1 ns because we may stack on file
+        * systems whose granularity is as good.
+        */
+       sb->s_time_gran = 1;
+
+       sb->s_magic = SDCARDFS_SUPER_MAGIC;
+       sb->s_op = &sdcardfs_sops;
+
+       /* get a new inode and allocate our root dentry */
+       inode = sdcardfs_iget(sb, lower_path.dentry->d_inode, 0);
+       if (IS_ERR(inode)) {
+               err = PTR_ERR(inode);
+               goto out_sput;
+       }
+       sb->s_root = d_make_root(inode);
+       if (!sb->s_root) {
+               err = -ENOMEM;
+               goto out_iput;
+       }
+       d_set_d_op(sb->s_root, &sdcardfs_ci_dops);
+
+       /* link the upper and lower dentries */
+       sb->s_root->d_fsdata = NULL;
+       err = new_dentry_private_data(sb->s_root);
+       if (err)
+               goto out_freeroot;
+
+       /* set the lower dentries for s_root */
+       sdcardfs_set_lower_path(sb->s_root, &lower_path);
+
+       /*
+        * No need to call interpose because we already have a positive
+        * dentry, which was instantiated by d_make_root.  Just need to
+        * d_rehash it.
+        */
+       d_rehash(sb->s_root);
+
+       /* setup permission policy */
+       sb_info->obbpath_s = kzalloc(PATH_MAX, GFP_KERNEL);
+       mutex_lock(&sdcardfs_super_list_lock);
+       if(sb_info->options.multiuser) {
+               setup_derived_state(sb->s_root->d_inode, PERM_PRE_ROOT, sb_info->options.fs_user_id, AID_ROOT, false);
+               snprintf(sb_info->obbpath_s, PATH_MAX, "%s/obb", dev_name);
+               /*err =  prepare_dir(sb_info->obbpath_s,
+                                       sb_info->options.fs_low_uid,
+                                       sb_info->options.fs_low_gid, 00755);*/
+       } else {
+               setup_derived_state(sb->s_root->d_inode, PERM_ROOT, sb_info->options.fs_low_uid, AID_ROOT, false);
+               snprintf(sb_info->obbpath_s, PATH_MAX, "%s/Android/obb", dev_name);
+       }
+       fix_derived_permission(sb->s_root->d_inode);
+       sb_info->sb = sb;
+       list_add(&sb_info->list, &sdcardfs_super_list);
+       mutex_unlock(&sdcardfs_super_list_lock);
+
+       if (!silent)
+               printk(KERN_INFO "sdcardfs: mounted on top of %s type %s\n",
+                               dev_name, lower_sb->s_type->name);
+       goto out; /* all is well */
+
+       /* no longer needed: free_dentry_private_data(sb->s_root); */
+out_freeroot:
+       dput(sb->s_root);
+out_iput:
+       iput(inode);
+out_sput:
+       /* drop refs we took earlier */
+       atomic_dec(&lower_sb->s_active);
+out_freesbi:
+       kfree(SDCARDFS_SB(sb));
+       sb->s_fs_info = NULL;
+out_free:
+       path_put(&lower_path);
+
+out:
+       return err;
+}
+
+/* A feature which supports mount_nodev() with options */
+static struct dentry *mount_nodev_with_options(struct file_system_type *fs_type,
+        int flags, const char *dev_name, void *data,
+        int (*fill_super)(struct super_block *, const char *, void *, int))
+
+{
+       int error;
+       struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
+
+       if (IS_ERR(s))
+               return ERR_CAST(s);
+
+       s->s_flags = flags;
+
+       error = fill_super(s, dev_name, data, flags & MS_SILENT ? 1 : 0);
+       if (error) {
+               deactivate_locked_super(s);
+               return ERR_PTR(error);
+       }
+       s->s_flags |= MS_ACTIVE;
+       return dget(s->s_root);
+}
+
+struct dentry *sdcardfs_mount(struct file_system_type *fs_type, int flags,
+                           const char *dev_name, void *raw_data)
+{
+       /*
+        * dev_name is a lower_path_name,
+        * raw_data is a option string.
+        */
+       return mount_nodev_with_options(fs_type, flags, dev_name,
+                                       raw_data, sdcardfs_read_super);
+}
+
+void sdcardfs_kill_sb(struct super_block *sb) {
+       struct sdcardfs_sb_info *sbi;
+       if (sb->s_magic == SDCARDFS_SUPER_MAGIC) {
+               sbi = SDCARDFS_SB(sb);
+               mutex_lock(&sdcardfs_super_list_lock);
+               list_del(&sbi->list);
+               mutex_unlock(&sdcardfs_super_list_lock);
+       }
+       generic_shutdown_super(sb);
+}
+
+static struct file_system_type sdcardfs_fs_type = {
+       .owner          = THIS_MODULE,
+       .name           = SDCARDFS_NAME,
+       .mount          = sdcardfs_mount,
+       .kill_sb        = sdcardfs_kill_sb,
+       .fs_flags       = 0,
+};
+
+static int __init init_sdcardfs_fs(void)
+{
+       int err;
+
+       pr_info("Registering sdcardfs " SDCARDFS_VERSION "\n");
+
+       err = sdcardfs_init_inode_cache();
+       if (err)
+               goto out;
+       err = sdcardfs_init_dentry_cache();
+       if (err)
+               goto out;
+       err = packagelist_init();
+       if (err)
+               goto out;
+       err = register_filesystem(&sdcardfs_fs_type);
+out:
+       if (err) {
+               sdcardfs_destroy_inode_cache();
+               sdcardfs_destroy_dentry_cache();
+               packagelist_exit();
+       }
+       return err;
+}
+
+static void __exit exit_sdcardfs_fs(void)
+{
+       sdcardfs_destroy_inode_cache();
+       sdcardfs_destroy_dentry_cache();
+       packagelist_exit();
+       unregister_filesystem(&sdcardfs_fs_type);
+       pr_info("Completed sdcardfs module unload\n");
+}
+
+MODULE_AUTHOR("Erez Zadok, Filesystems and Storage Lab, Stony Brook University"
+             " (http://www.fsl.cs.sunysb.edu/)");
+MODULE_DESCRIPTION("Wrapfs " SDCARDFS_VERSION
+                  " (http://wrapfs.filesystems.org/)");
+MODULE_LICENSE("GPL");
+
+module_init(init_sdcardfs_fs);
+module_exit(exit_sdcardfs_fs);
diff --git a/fs/sdcardfs/mmap.c b/fs/sdcardfs/mmap.c
new file mode 100644 (file)
index 0000000..e21f646
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * fs/sdcardfs/mmap.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+
+static int sdcardfs_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       int err;
+       struct file *file, *lower_file;
+       const struct vm_operations_struct *lower_vm_ops;
+       struct vm_area_struct lower_vma;
+
+       memcpy(&lower_vma, vma, sizeof(struct vm_area_struct));
+       file = lower_vma.vm_file;
+       lower_vm_ops = SDCARDFS_F(file)->lower_vm_ops;
+       BUG_ON(!lower_vm_ops);
+
+       lower_file = sdcardfs_lower_file(file);
+       /*
+        * XXX: vm_ops->fault may be called in parallel.  Because we have to
+        * resort to temporarily changing the vma->vm_file to point to the
+        * lower file, a concurrent invocation of sdcardfs_fault could see a
+        * different value.  In this workaround, we keep a different copy of
+        * the vma structure in our stack, so we never expose a different
+        * value of the vma->vm_file called to us, even temporarily.  A
+        * better fix would be to change the calling semantics of ->fault to
+        * take an explicit file pointer.
+        */
+       lower_vma.vm_file = lower_file;
+       err = lower_vm_ops->fault(&lower_vma, vmf);
+       return err;
+}
+
+static ssize_t sdcardfs_direct_IO(struct kiocb *iocb,
+               struct iov_iter *iter, loff_t pos)
+{
+       /*
+     * This function returns zero on purpose in order to support direct IO.
+        * __dentry_open checks a_ops->direct_IO and returns EINVAL if it is null.
+     *
+        * However, this function won't be called by certain file operations
+     * including generic fs functions.  * reads and writes are delivered to
+     * the lower file systems and the direct IOs will be handled by them.
+        *
+     * NOTE: exceptionally, on the recent kernels (since Linux 3.8.x),
+     * swap_writepage invokes this function directly.
+        */
+       printk(KERN_INFO "%s, operation is not supported\n", __func__);
+       return 0;
+}
+
+/*
+ * XXX: the default address_space_ops for sdcardfs is empty.  We cannot set
+ * our inode->i_mapping->a_ops to NULL because too many code paths expect
+ * the a_ops vector to be non-NULL.
+ */
+const struct address_space_operations sdcardfs_aops = {
+       /* empty on purpose */
+       .direct_IO      = sdcardfs_direct_IO,
+};
+
+const struct vm_operations_struct sdcardfs_vm_ops = {
+       .fault          = sdcardfs_fault,
+};
diff --git a/fs/sdcardfs/multiuser.h b/fs/sdcardfs/multiuser.h
new file mode 100644 (file)
index 0000000..923ba10
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * fs/sdcardfs/multiuser.h
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#define MULTIUSER_APP_PER_USER_RANGE 100000
+
+typedef uid_t userid_t;
+typedef uid_t appid_t;
+
+static inline userid_t multiuser_get_user_id(uid_t uid) {
+    return uid / MULTIUSER_APP_PER_USER_RANGE;
+}
+
+static inline appid_t multiuser_get_app_id(uid_t uid) {
+    return uid % MULTIUSER_APP_PER_USER_RANGE;
+}
+
+static inline uid_t multiuser_get_uid(userid_t userId, appid_t appId) {
+    return userId * MULTIUSER_APP_PER_USER_RANGE + (appId % MULTIUSER_APP_PER_USER_RANGE);
+}
+
diff --git a/fs/sdcardfs/packagelist.c b/fs/sdcardfs/packagelist.c
new file mode 100644 (file)
index 0000000..9c33405
--- /dev/null
@@ -0,0 +1,444 @@
+/*
+ * fs/sdcardfs/packagelist.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+#include <linux/hashtable.h>
+#include <linux/delay.h>
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/configfs.h>
+
+#define STRING_BUF_SIZE                (512)
+
+struct hashtable_entry {
+       struct hlist_node hlist;
+       void *key;
+       unsigned int value;
+};
+
+struct sb_list {
+       struct super_block *sb;
+       struct list_head list;
+};
+
+struct packagelist_data {
+       DECLARE_HASHTABLE(package_to_appid,8);
+       struct mutex hashtable_lock;
+
+};
+
+static struct packagelist_data *pkgl_data_all;
+
+static struct kmem_cache *hashtable_entry_cachep;
+
+static unsigned int str_hash(const char *key) {
+       int i;
+       unsigned int h = strlen(key);
+       char *data = (char *)key;
+
+       for (i = 0; i < strlen(key); i++) {
+               h = h * 31 + *data;
+               data++;
+       }
+       return h;
+}
+
+appid_t get_appid(void *pkgl_id, const char *app_name)
+{
+       struct packagelist_data *pkgl_dat = pkgl_data_all;
+       struct hashtable_entry *hash_cur;
+       unsigned int hash = str_hash(app_name);
+       appid_t ret_id;
+
+       mutex_lock(&pkgl_dat->hashtable_lock);
+       hash_for_each_possible(pkgl_dat->package_to_appid, hash_cur, hlist, hash) {
+               if (!strcasecmp(app_name, hash_cur->key)) {
+                       ret_id = (appid_t)hash_cur->value;
+                       mutex_unlock(&pkgl_dat->hashtable_lock);
+                       return ret_id;
+               }
+       }
+       mutex_unlock(&pkgl_dat->hashtable_lock);
+       return 0;
+}
+
+/* Kernel has already enforced everything we returned through
+ * derive_permissions_locked(), so this is used to lock down access
+ * even further, such as enforcing that apps hold sdcard_rw. */
+int check_caller_access_to_name(struct inode *parent_node, const char* name) {
+
+       /* Always block security-sensitive files at root */
+       if (parent_node && SDCARDFS_I(parent_node)->perm == PERM_ROOT) {
+               if (!strcasecmp(name, "autorun.inf")
+                       || !strcasecmp(name, ".android_secure")
+                       || !strcasecmp(name, "android_secure")) {
+                       return 0;
+               }
+       }
+
+       /* Root always has access; access for any other UIDs should always
+        * be controlled through packages.list. */
+       if (from_kuid(&init_user_ns, current_fsuid()) == 0) {
+               return 1;
+       }
+
+       /* No extra permissions to enforce */
+       return 1;
+}
+
+/* This function is used when file opening. The open flags must be
+ * checked before calling check_caller_access_to_name() */
+int open_flags_to_access_mode(int open_flags) {
+       if((open_flags & O_ACCMODE) == O_RDONLY) {
+               return 0; /* R_OK */
+       } else if ((open_flags & O_ACCMODE) == O_WRONLY) {
+               return 1; /* W_OK */
+       } else {
+               /* Probably O_RDRW, but treat as default to be safe */
+               return 1; /* R_OK | W_OK */
+       }
+}
+
+static int insert_str_to_int_lock(struct packagelist_data *pkgl_dat, char *key,
+               unsigned int value)
+{
+       struct hashtable_entry *hash_cur;
+       struct hashtable_entry *new_entry;
+       unsigned int hash = str_hash(key);
+
+       hash_for_each_possible(pkgl_dat->package_to_appid, hash_cur, hlist, hash) {
+               if (!strcasecmp(key, hash_cur->key)) {
+                       hash_cur->value = value;
+                       return 0;
+               }
+       }
+       new_entry = kmem_cache_alloc(hashtable_entry_cachep, GFP_KERNEL);
+       if (!new_entry)
+               return -ENOMEM;
+       new_entry->key = kstrdup(key, GFP_KERNEL);
+       new_entry->value = value;
+       hash_add(pkgl_dat->package_to_appid, &new_entry->hlist, hash);
+       return 0;
+}
+
+static void fixup_perms(struct super_block *sb) {
+       if (sb && sb->s_magic == SDCARDFS_SUPER_MAGIC) {
+               mutex_lock(&sb->s_root->d_inode->i_mutex);
+               get_derive_permissions_recursive(sb->s_root);
+               mutex_unlock(&sb->s_root->d_inode->i_mutex);
+       }
+}
+
+static int insert_str_to_int(struct packagelist_data *pkgl_dat, char *key,
+               unsigned int value) {
+       int ret;
+       struct sdcardfs_sb_info *sbinfo;
+       mutex_lock(&sdcardfs_super_list_lock);
+       mutex_lock(&pkgl_dat->hashtable_lock);
+       ret = insert_str_to_int_lock(pkgl_dat, key, value);
+       mutex_unlock(&pkgl_dat->hashtable_lock);
+
+       list_for_each_entry(sbinfo, &sdcardfs_super_list, list) {
+               if (sbinfo) {
+                       fixup_perms(sbinfo->sb);
+               }
+       }
+       mutex_unlock(&sdcardfs_super_list_lock);
+       return ret;
+}
+
+static void remove_str_to_int_lock(struct hashtable_entry *h_entry) {
+       kfree(h_entry->key);
+       hash_del(&h_entry->hlist);
+       kmem_cache_free(hashtable_entry_cachep, h_entry);
+}
+
+static void remove_str_to_int(struct packagelist_data *pkgl_dat, const char *key)
+{
+       struct sdcardfs_sb_info *sbinfo;
+       struct hashtable_entry *hash_cur;
+       unsigned int hash = str_hash(key);
+       mutex_lock(&sdcardfs_super_list_lock);
+       mutex_lock(&pkgl_dat->hashtable_lock);
+       hash_for_each_possible(pkgl_dat->package_to_appid, hash_cur, hlist, hash) {
+               if (!strcasecmp(key, hash_cur->key)) {
+                       remove_str_to_int_lock(hash_cur);
+                       break;
+               }
+       }
+       mutex_unlock(&pkgl_dat->hashtable_lock);
+       list_for_each_entry(sbinfo, &sdcardfs_super_list, list) {
+               if (sbinfo) {
+                       fixup_perms(sbinfo->sb);
+               }
+       }
+       mutex_unlock(&sdcardfs_super_list_lock);
+       return;
+}
+
+static void remove_all_hashentrys(struct packagelist_data *pkgl_dat)
+{
+       struct hashtable_entry *hash_cur;
+       struct hlist_node *h_t;
+       int i;
+       mutex_lock(&pkgl_dat->hashtable_lock);
+       hash_for_each_safe(pkgl_dat->package_to_appid, i, h_t, hash_cur, hlist)
+               remove_str_to_int_lock(hash_cur);
+       mutex_unlock(&pkgl_dat->hashtable_lock);
+       hash_init(pkgl_dat->package_to_appid);
+}
+
+static struct packagelist_data * packagelist_create(void)
+{
+       struct packagelist_data *pkgl_dat;
+
+       pkgl_dat = kmalloc(sizeof(*pkgl_dat), GFP_KERNEL | __GFP_ZERO);
+       if (!pkgl_dat) {
+                printk(KERN_ERR "sdcardfs: Failed to create hash\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       mutex_init(&pkgl_dat->hashtable_lock);
+       hash_init(pkgl_dat->package_to_appid);
+
+       return pkgl_dat;
+}
+
+static void packagelist_destroy(struct packagelist_data *pkgl_dat)
+{
+       remove_all_hashentrys(pkgl_dat);
+       printk(KERN_INFO "sdcardfs: destroyed packagelist pkgld\n");
+       kfree(pkgl_dat);
+}
+
+struct package_appid {
+       struct config_item item;
+       int add_pid;
+};
+
+static inline struct package_appid *to_package_appid(struct config_item *item)
+{
+       return item ? container_of(item, struct package_appid, item) : NULL;
+}
+
+static ssize_t package_appid_attr_show(struct config_item *item,
+                                     char *page)
+{
+       ssize_t count;
+       count = sprintf(page, "%d\n", get_appid(pkgl_data_all, item->ci_name));
+       return count;
+}
+
+static ssize_t package_appid_attr_store(struct config_item *item,
+                                      const char *page, size_t count)
+{
+       struct package_appid *package_appid = to_package_appid(item);
+       unsigned long tmp;
+       char *p = (char *) page;
+       int ret;
+
+       tmp = simple_strtoul(p, &p, 10);
+       if (!p || (*p && (*p != '\n')))
+               return -EINVAL;
+
+       if (tmp > INT_MAX)
+               return -ERANGE;
+       ret = insert_str_to_int(pkgl_data_all, item->ci_name, (unsigned int)tmp);
+       package_appid->add_pid = tmp;
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+static struct configfs_attribute package_appid_attr_add_pid = {
+       .ca_owner = THIS_MODULE,
+       .ca_name = "appid",
+       .ca_mode = S_IRUGO | S_IWUGO,
+       .show = package_appid_attr_show,
+       .store = package_appid_attr_store,
+};
+
+static struct configfs_attribute *package_appid_attrs[] = {
+       &package_appid_attr_add_pid,
+       NULL,
+};
+
+static void package_appid_release(struct config_item *item)
+{
+       printk(KERN_INFO "sdcardfs: removing %s\n", item->ci_dentry->d_name.name);
+       /* item->ci_name is freed already, so we rely on the dentry */
+       remove_str_to_int(pkgl_data_all, item->ci_dentry->d_name.name);
+       kfree(to_package_appid(item));
+}
+
+static struct configfs_item_operations package_appid_item_ops = {
+       .release                = package_appid_release,
+};
+
+static struct config_item_type package_appid_type = {
+       .ct_item_ops    = &package_appid_item_ops,
+       .ct_attrs       = package_appid_attrs,
+       .ct_owner       = THIS_MODULE,
+};
+
+
+struct sdcardfs_packages {
+       struct config_group group;
+};
+
+static inline struct sdcardfs_packages *to_sdcardfs_packages(struct config_item *item)
+{
+       return item ? container_of(to_config_group(item), struct sdcardfs_packages, group) : NULL;
+}
+
+static struct config_item *sdcardfs_packages_make_item(struct config_group *group, const char *name)
+{
+       struct package_appid *package_appid;
+
+       package_appid = kzalloc(sizeof(struct package_appid), GFP_KERNEL);
+       if (!package_appid)
+               return ERR_PTR(-ENOMEM);
+
+       config_item_init_type_name(&package_appid->item, name,
+                                  &package_appid_type);
+
+       package_appid->add_pid = 0;
+
+       return &package_appid->item;
+}
+
+static ssize_t packages_attr_show(struct config_item *item,
+                                        char *page)
+{
+       struct hashtable_entry *hash_cur;
+       struct hlist_node *h_t;
+       int i;
+       int count = 0, written = 0;
+       char errormsg[] = "<truncated>\n";
+
+       mutex_lock(&pkgl_data_all->hashtable_lock);
+       hash_for_each_safe(pkgl_data_all->package_to_appid, i, h_t, hash_cur, hlist) {
+               written = scnprintf(page + count, PAGE_SIZE - sizeof(errormsg) - count, "%s %d\n", (char *)hash_cur->key, hash_cur->value);
+               if (count + written == PAGE_SIZE - sizeof(errormsg)) {
+                       count += scnprintf(page + count, PAGE_SIZE - count, errormsg);
+                       break;
+               }
+               count += written;
+       }
+       mutex_unlock(&pkgl_data_all->hashtable_lock);
+
+       return count;
+}
+
+static struct configfs_attribute sdcardfs_packages_attr_description = {
+       .ca_owner = THIS_MODULE,
+       .ca_name = "packages_gid.list",
+       .ca_mode = S_IRUGO,
+       .show = packages_attr_show,
+};
+
+static struct configfs_attribute *sdcardfs_packages_attrs[] = {
+       &sdcardfs_packages_attr_description,
+       NULL,
+};
+
+static void sdcardfs_packages_release(struct config_item *item)
+{
+
+       printk(KERN_INFO "sdcardfs: destroyed something?\n");
+       kfree(to_sdcardfs_packages(item));
+}
+
+static struct configfs_item_operations sdcardfs_packages_item_ops = {
+       .release        = sdcardfs_packages_release,
+};
+
+/*
+ * Note that, since no extra work is required on ->drop_item(),
+ * no ->drop_item() is provided.
+ */
+static struct configfs_group_operations sdcardfs_packages_group_ops = {
+       .make_item      = sdcardfs_packages_make_item,
+};
+
+static struct config_item_type sdcardfs_packages_type = {
+       .ct_item_ops    = &sdcardfs_packages_item_ops,
+       .ct_group_ops   = &sdcardfs_packages_group_ops,
+       .ct_attrs       = sdcardfs_packages_attrs,
+       .ct_owner       = THIS_MODULE,
+};
+
+static struct configfs_subsystem sdcardfs_packages_subsys = {
+       .su_group = {
+               .cg_item = {
+                       .ci_namebuf = "sdcardfs",
+                       .ci_type = &sdcardfs_packages_type,
+               },
+       },
+};
+
+static int configfs_sdcardfs_init(void)
+{
+       int ret;
+       struct configfs_subsystem *subsys = &sdcardfs_packages_subsys;
+
+       config_group_init(&subsys->su_group);
+       mutex_init(&subsys->su_mutex);
+       ret = configfs_register_subsystem(subsys);
+       if (ret) {
+               printk(KERN_ERR "Error %d while registering subsystem %s\n",
+                      ret,
+                      subsys->su_group.cg_item.ci_namebuf);
+       }
+       return ret;
+}
+
+static void configfs_sdcardfs_exit(void)
+{
+       configfs_unregister_subsystem(&sdcardfs_packages_subsys);
+}
+
+int packagelist_init(void)
+{
+       hashtable_entry_cachep =
+               kmem_cache_create("packagelist_hashtable_entry",
+                                       sizeof(struct hashtable_entry), 0, 0, NULL);
+       if (!hashtable_entry_cachep) {
+               printk(KERN_ERR "sdcardfs: failed creating pkgl_hashtable entry slab cache\n");
+               return -ENOMEM;
+       }
+
+       pkgl_data_all = packagelist_create();
+       configfs_sdcardfs_init();
+        return 0;
+}
+
+void packagelist_exit(void)
+{
+       configfs_sdcardfs_exit();
+       packagelist_destroy(pkgl_data_all);
+       if (hashtable_entry_cachep)
+               kmem_cache_destroy(hashtable_entry_cachep);
+}
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
new file mode 100644 (file)
index 0000000..f111f89
--- /dev/null
@@ -0,0 +1,530 @@
+/*
+ * fs/sdcardfs/sdcardfs.h
+ *
+ * The sdcardfs v2.0
+ *   This file system replaces the sdcard daemon on Android
+ *   On version 2.0, some of the daemon functions have been ported
+ *   to support the multi-user concepts of Android 4.4
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _SDCARDFS_H_
+#define _SDCARDFS_H_
+
+#include <linux/dcache.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include <linux/fs_stack.h>
+#include <linux/magic.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/security.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include "multiuser.h"
+
+/* the file system name */
+#define SDCARDFS_NAME "sdcardfs"
+
+/* sdcardfs root inode number */
+#define SDCARDFS_ROOT_INO     1
+
+/* useful for tracking code reachability */
+#define UDBG printk(KERN_DEFAULT "DBG:%s:%s:%d\n", __FILE__, __func__, __LINE__)
+
+#define SDCARDFS_DIRENT_SIZE 256
+
+/* temporary static uid settings for development */
+#define AID_ROOT             0 /* uid for accessing /mnt/sdcard & extSdcard */
+#define AID_MEDIA_RW      1023 /* internal media storage write access */
+
+#define AID_SDCARD_RW     1015 /* external storage write access */
+#define AID_SDCARD_R      1028 /* external storage read access */
+#define AID_SDCARD_PICS   1033 /* external storage photos access */
+#define AID_SDCARD_AV     1034 /* external storage audio/video access */
+#define AID_SDCARD_ALL    1035 /* access all users external storage */
+
+#define AID_PACKAGE_INFO  1027
+
+#define fix_derived_permission(x)      \
+       do {                                            \
+               (x)->i_uid = make_kuid(&init_user_ns, SDCARDFS_I(x)->d_uid);    \
+               (x)->i_gid = make_kgid(&init_user_ns, get_gid(SDCARDFS_I(x)));  \
+               (x)->i_mode = ((x)->i_mode & S_IFMT) | get_mode(SDCARDFS_I(x));\
+       } while (0)
+
+
+/* OVERRIDE_CRED() and REVERT_CRED()
+ *     OVERRID_CRED()
+ *             backup original task->cred
+ *             and modifies task->cred->fsuid/fsgid to specified value.
+ *     REVERT_CRED()
+ *             restore original task->cred->fsuid/fsgid.
+ * These two macro should be used in pair, and OVERRIDE_CRED() should be
+ * placed at the beginning of a function, right after variable declaration.
+ */
+#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred)                \
+       saved_cred = override_fsids(sdcardfs_sbi);      \
+       if (!saved_cred) { return -ENOMEM; }
+
+#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred)    \
+       saved_cred = override_fsids(sdcardfs_sbi);      \
+       if (!saved_cred) { return ERR_PTR(-ENOMEM); }
+
+#define REVERT_CRED(saved_cred)        revert_fsids(saved_cred)
+
+#define DEBUG_CRED()           \
+       printk("KAKJAGI: %s:%d fsuid %d fsgid %d\n",    \
+               __FUNCTION__, __LINE__,                 \
+               (int)current->cred->fsuid,              \
+               (int)current->cred->fsgid);
+
+/* Android 5.0 support */
+
+/* Permission mode for a specific node. Controls how file permissions
+ * are derived for children nodes. */
+typedef enum {
+    /* Nothing special; this node should just inherit from its parent. */
+    PERM_INHERIT,
+    /* This node is one level above a normal root; used for legacy layouts
+     * which use the first level to represent user_id. */
+    PERM_PRE_ROOT,
+    /* This node is "/" */
+    PERM_ROOT,
+    /* This node is "/Android" */
+    PERM_ANDROID,
+    /* This node is "/Android/data" */
+    PERM_ANDROID_DATA,
+    /* This node is "/Android/obb" */
+    PERM_ANDROID_OBB,
+    /* This node is "/Android/media" */
+    PERM_ANDROID_MEDIA,
+} perm_t;
+
+struct sdcardfs_sb_info;
+struct sdcardfs_mount_options;
+
+/* Do not directly use this function. Use OVERRIDE_CRED() instead. */
+const struct cred * override_fsids(struct sdcardfs_sb_info* sbi);
+/* Do not directly use this function, use REVERT_CRED() instead. */
+void revert_fsids(const struct cred * old_cred);
+
+/* operations vectors defined in specific files */
+extern const struct file_operations sdcardfs_main_fops;
+extern const struct file_operations sdcardfs_dir_fops;
+extern const struct inode_operations sdcardfs_main_iops;
+extern const struct inode_operations sdcardfs_dir_iops;
+extern const struct inode_operations sdcardfs_symlink_iops;
+extern const struct super_operations sdcardfs_sops;
+extern const struct dentry_operations sdcardfs_ci_dops;
+extern const struct address_space_operations sdcardfs_aops, sdcardfs_dummy_aops;
+extern const struct vm_operations_struct sdcardfs_vm_ops;
+
+extern int sdcardfs_init_inode_cache(void);
+extern void sdcardfs_destroy_inode_cache(void);
+extern int sdcardfs_init_dentry_cache(void);
+extern void sdcardfs_destroy_dentry_cache(void);
+extern int new_dentry_private_data(struct dentry *dentry);
+extern void free_dentry_private_data(struct dentry *dentry);
+extern struct dentry *sdcardfs_lookup(struct inode *dir, struct dentry *dentry,
+                               unsigned int flags);
+extern struct inode *sdcardfs_iget(struct super_block *sb,
+                                struct inode *lower_inode, userid_t id);
+extern int sdcardfs_interpose(struct dentry *dentry, struct super_block *sb,
+                           struct path *lower_path, userid_t id);
+
+/* file private data */
+struct sdcardfs_file_info {
+       struct file *lower_file;
+       const struct vm_operations_struct *lower_vm_ops;
+};
+
+/* sdcardfs inode data in memory */
+struct sdcardfs_inode_info {
+       struct inode *lower_inode;
+       /* state derived based on current position in hierachy */
+       perm_t perm;
+       userid_t userid;
+       uid_t d_uid;
+       bool under_android;
+
+       struct inode vfs_inode;
+};
+
+
+/* sdcardfs dentry data in memory */
+struct sdcardfs_dentry_info {
+       spinlock_t lock;        /* protects lower_path */
+       struct path lower_path;
+       struct path orig_path;
+};
+
+struct sdcardfs_mount_options {
+       uid_t fs_low_uid;
+       gid_t fs_low_gid;
+       userid_t fs_user_id;
+       gid_t gid;
+       mode_t mask;
+       bool multiuser;
+       unsigned int reserved_mb;
+};
+
+/* sdcardfs super-block data in memory */
+struct sdcardfs_sb_info {
+       struct super_block *sb;
+       struct super_block *lower_sb;
+       /* derived perm policy : some of options have been added
+        * to sdcardfs_mount_options (Android 4.4 support) */
+       struct sdcardfs_mount_options options;
+       spinlock_t lock;        /* protects obbpath */
+       char *obbpath_s;
+       struct path obbpath;
+       void *pkgl_id;
+       struct list_head list;
+};
+
+/*
+ * inode to private data
+ *
+ * Since we use containers and the struct inode is _inside_ the
+ * sdcardfs_inode_info structure, SDCARDFS_I will always (given a non-NULL
+ * inode pointer), return a valid non-NULL pointer.
+ */
+static inline struct sdcardfs_inode_info *SDCARDFS_I(const struct inode *inode)
+{
+       return container_of(inode, struct sdcardfs_inode_info, vfs_inode);
+}
+
+/* dentry to private data */
+#define SDCARDFS_D(dent) ((struct sdcardfs_dentry_info *)(dent)->d_fsdata)
+
+/* superblock to private data */
+#define SDCARDFS_SB(super) ((struct sdcardfs_sb_info *)(super)->s_fs_info)
+
+/* file to private Data */
+#define SDCARDFS_F(file) ((struct sdcardfs_file_info *)((file)->private_data))
+
+/* file to lower file */
+static inline struct file *sdcardfs_lower_file(const struct file *f)
+{
+       return SDCARDFS_F(f)->lower_file;
+}
+
+static inline void sdcardfs_set_lower_file(struct file *f, struct file *val)
+{
+       SDCARDFS_F(f)->lower_file = val;
+}
+
+/* inode to lower inode. */
+static inline struct inode *sdcardfs_lower_inode(const struct inode *i)
+{
+       return SDCARDFS_I(i)->lower_inode;
+}
+
+static inline void sdcardfs_set_lower_inode(struct inode *i, struct inode *val)
+{
+       SDCARDFS_I(i)->lower_inode = val;
+}
+
+/* superblock to lower superblock */
+static inline struct super_block *sdcardfs_lower_super(
+       const struct super_block *sb)
+{
+       return SDCARDFS_SB(sb)->lower_sb;
+}
+
+static inline void sdcardfs_set_lower_super(struct super_block *sb,
+                                         struct super_block *val)
+{
+       SDCARDFS_SB(sb)->lower_sb = val;
+}
+
+/* path based (dentry/mnt) macros */
+static inline void pathcpy(struct path *dst, const struct path *src)
+{
+       dst->dentry = src->dentry;
+       dst->mnt = src->mnt;
+}
+
+/* sdcardfs_get_pname functions calls path_get()
+ * therefore, the caller must call "proper" path_put functions
+ */
+#define SDCARDFS_DENT_FUNC(pname) \
+static inline void sdcardfs_get_##pname(const struct dentry *dent, \
+                                       struct path *pname) \
+{ \
+       spin_lock(&SDCARDFS_D(dent)->lock); \
+       pathcpy(pname, &SDCARDFS_D(dent)->pname); \
+       path_get(pname); \
+       spin_unlock(&SDCARDFS_D(dent)->lock); \
+       return; \
+} \
+static inline void sdcardfs_put_##pname(const struct dentry *dent, \
+                                       struct path *pname) \
+{ \
+       path_put(pname); \
+       return; \
+} \
+static inline void sdcardfs_set_##pname(const struct dentry *dent, \
+                                       struct path *pname) \
+{ \
+       spin_lock(&SDCARDFS_D(dent)->lock); \
+       pathcpy(&SDCARDFS_D(dent)->pname, pname); \
+       spin_unlock(&SDCARDFS_D(dent)->lock); \
+       return; \
+} \
+static inline void sdcardfs_reset_##pname(const struct dentry *dent) \
+{ \
+       spin_lock(&SDCARDFS_D(dent)->lock); \
+       SDCARDFS_D(dent)->pname.dentry = NULL; \
+       SDCARDFS_D(dent)->pname.mnt = NULL; \
+       spin_unlock(&SDCARDFS_D(dent)->lock); \
+       return; \
+} \
+static inline void sdcardfs_put_reset_##pname(const struct dentry *dent) \
+{ \
+       struct path pname; \
+       spin_lock(&SDCARDFS_D(dent)->lock); \
+       if(SDCARDFS_D(dent)->pname.dentry) { \
+               pathcpy(&pname, &SDCARDFS_D(dent)->pname); \
+               SDCARDFS_D(dent)->pname.dentry = NULL; \
+               SDCARDFS_D(dent)->pname.mnt = NULL; \
+               spin_unlock(&SDCARDFS_D(dent)->lock); \
+               path_put(&pname); \
+       } else \
+               spin_unlock(&SDCARDFS_D(dent)->lock); \
+       return; \
+}
+
+SDCARDFS_DENT_FUNC(lower_path)
+SDCARDFS_DENT_FUNC(orig_path)
+
+static inline int get_gid(struct sdcardfs_inode_info *info) {
+       struct sdcardfs_sb_info *sb_info = SDCARDFS_SB(info->vfs_inode.i_sb);
+       if (sb_info->options.gid == AID_SDCARD_RW) {
+               /* As an optimization, certain trusted system components only run
+                * as owner but operate across all users. Since we're now handing
+                * out the sdcard_rw GID only to trusted apps, we're okay relaxing
+                * the user boundary enforcement for the default view. The UIDs
+                * assigned to app directories are still multiuser aware. */
+               return AID_SDCARD_RW;
+       } else {
+               return multiuser_get_uid(info->userid, sb_info->options.gid);
+       }
+}
+static inline int get_mode(struct sdcardfs_inode_info *info) {
+       int owner_mode;
+       int filtered_mode;
+       struct sdcardfs_sb_info *sb_info = SDCARDFS_SB(info->vfs_inode.i_sb);
+       int visible_mode = 0775 & ~sb_info->options.mask;
+
+       if (info->perm == PERM_PRE_ROOT) {
+               /* Top of multi-user view should always be visible to ensure
+               * secondary users can traverse inside. */
+               visible_mode = 0711;
+       } else if (info->under_android) {
+               /* Block "other" access to Android directories, since only apps
+               * belonging to a specific user should be in there; we still
+               * leave +x open for the default view. */
+               if (sb_info->options.gid == AID_SDCARD_RW) {
+                       visible_mode = visible_mode & ~0006;
+               } else {
+                       visible_mode = visible_mode & ~0007;
+               }
+       }
+       owner_mode = info->lower_inode->i_mode & 0700;
+       filtered_mode = visible_mode & (owner_mode | (owner_mode >> 3) | (owner_mode >> 6));
+       return filtered_mode;
+}
+
+static inline int has_graft_path(const struct dentry *dent)
+{
+       int ret = 0;
+
+       spin_lock(&SDCARDFS_D(dent)->lock);
+       if (SDCARDFS_D(dent)->orig_path.dentry != NULL)
+               ret = 1;
+       spin_unlock(&SDCARDFS_D(dent)->lock);
+
+       return ret;
+}
+
+static inline void sdcardfs_get_real_lower(const struct dentry *dent,
+                                               struct path *real_lower)
+{
+       /* in case of a local obb dentry
+        * the orig_path should be returned
+        */
+       if(has_graft_path(dent))
+               sdcardfs_get_orig_path(dent, real_lower);
+       else
+               sdcardfs_get_lower_path(dent, real_lower);
+}
+
+static inline void sdcardfs_put_real_lower(const struct dentry *dent,
+                                               struct path *real_lower)
+{
+       if(has_graft_path(dent))
+               sdcardfs_put_orig_path(dent, real_lower);
+       else
+               sdcardfs_put_lower_path(dent, real_lower);
+}
+
+extern struct mutex sdcardfs_super_list_lock;
+extern struct list_head sdcardfs_super_list;
+
+/* for packagelist.c */
+extern appid_t get_appid(void *pkgl_id, const char *app_name);
+extern int check_caller_access_to_name(struct inode *parent_node, const char* name);
+extern int open_flags_to_access_mode(int open_flags);
+extern int packagelist_init(void);
+extern void packagelist_exit(void);
+
+/* for derived_perm.c */
+extern void setup_derived_state(struct inode *inode, perm_t perm,
+                       userid_t userid, uid_t uid, bool under_android);
+extern void get_derived_permission(struct dentry *parent, struct dentry *dentry);
+extern void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, struct dentry *newdentry);
+extern void get_derive_permissions_recursive(struct dentry *parent);
+
+extern void update_derived_permission_lock(struct dentry *dentry);
+extern int need_graft_path(struct dentry *dentry);
+extern int is_base_obbpath(struct dentry *dentry);
+extern int is_obbpath_invalid(struct dentry *dentry);
+extern int setup_obb_dentry(struct dentry *dentry, struct path *lower_path);
+
+/* locking helpers */
+static inline struct dentry *lock_parent(struct dentry *dentry)
+{
+       struct dentry *dir = dget_parent(dentry);
+       mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT);
+       return dir;
+}
+
+static inline void unlock_dir(struct dentry *dir)
+{
+       mutex_unlock(&d_inode(dir)->i_mutex);
+       dput(dir);
+}
+
+static inline int prepare_dir(const char *path_s, uid_t uid, gid_t gid, mode_t mode)
+{
+       int err;
+       struct dentry *dent;
+       struct iattr attrs;
+       struct path parent;
+
+       dent = kern_path_locked(path_s, &parent);
+       if (IS_ERR(dent)) {
+               err = PTR_ERR(dent);
+               if (err == -EEXIST)
+                       err = 0;
+               goto out_unlock;
+       }
+
+       err = vfs_mkdir(d_inode(parent.dentry), dent, mode);
+       if (err) {
+               if (err == -EEXIST)
+                       err = 0;
+               goto out_dput;
+       }
+
+       attrs.ia_uid = make_kuid(&init_user_ns, uid);
+       attrs.ia_gid = make_kgid(&init_user_ns, gid);
+       attrs.ia_valid = ATTR_UID | ATTR_GID;
+       mutex_lock(&d_inode(dent)->i_mutex);
+       notify_change(dent, &attrs, NULL);
+       mutex_unlock(&d_inode(dent)->i_mutex);
+
+out_dput:
+       dput(dent);
+
+out_unlock:
+       /* parent dentry locked by lookup_create */
+       mutex_unlock(&d_inode(parent.dentry)->i_mutex);
+       path_put(&parent);
+       return err;
+}
+
+/*
+ * Return 1, if a disk has enough free space, otherwise 0.
+ * We assume that any files can not be overwritten.
+ */
+static inline int check_min_free_space(struct dentry *dentry, size_t size, int dir)
+{
+       int err;
+       struct path lower_path;
+       struct kstatfs statfs;
+       u64 avail;
+       struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+
+       if (sbi->options.reserved_mb) {
+               /* Get fs stat of lower filesystem. */
+               sdcardfs_get_lower_path(dentry, &lower_path);
+               err = vfs_statfs(&lower_path, &statfs);
+               sdcardfs_put_lower_path(dentry, &lower_path);
+
+               if (unlikely(err))
+                       return 0;
+
+               /* Invalid statfs informations. */
+               if (unlikely(statfs.f_bsize == 0))
+                       return 0;
+
+               /* if you are checking directory, set size to f_bsize. */
+               if (unlikely(dir))
+                       size = statfs.f_bsize;
+
+               /* available size */
+               avail = statfs.f_bavail * statfs.f_bsize;
+
+               /* not enough space */
+               if ((u64)size > avail)
+                       return 0;
+
+               /* enough space */
+               if ((avail - size) > (sbi->options.reserved_mb * 1024 * 1024))
+                       return 1;
+
+               return 0;
+       } else
+               return 1;
+}
+
+/* Copies attrs and maintains sdcardfs managed attrs */
+static inline void sdcardfs_copy_and_fix_attrs(struct inode *dest, const struct inode *src)
+{
+       dest->i_mode = (src->i_mode  & S_IFMT) | get_mode(SDCARDFS_I(dest));
+       dest->i_uid = make_kuid(&init_user_ns, SDCARDFS_I(dest)->d_uid);
+       dest->i_gid = make_kgid(&init_user_ns, get_gid(SDCARDFS_I(dest)));
+       dest->i_rdev = src->i_rdev;
+       dest->i_atime = src->i_atime;
+       dest->i_mtime = src->i_mtime;
+       dest->i_ctime = src->i_ctime;
+       dest->i_blkbits = src->i_blkbits;
+       dest->i_flags = src->i_flags;
+       set_nlink(dest, src->i_nlink);
+}
+#endif /* not _SDCARDFS_H_ */
diff --git a/fs/sdcardfs/super.c b/fs/sdcardfs/super.c
new file mode 100644 (file)
index 0000000..1d64901
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+ * fs/sdcardfs/super.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+
+/*
+ * The inode cache is used with alloc_inode for both our inode info and the
+ * vfs inode.
+ */
+static struct kmem_cache *sdcardfs_inode_cachep;
+
+/* final actions when unmounting a file system */
+static void sdcardfs_put_super(struct super_block *sb)
+{
+       struct sdcardfs_sb_info *spd;
+       struct super_block *s;
+
+       spd = SDCARDFS_SB(sb);
+       if (!spd)
+               return;
+
+       if(spd->obbpath_s) {
+               kfree(spd->obbpath_s);
+               path_put(&spd->obbpath);
+       }
+
+       /* decrement lower super references */
+       s = sdcardfs_lower_super(sb);
+       sdcardfs_set_lower_super(sb, NULL);
+       atomic_dec(&s->s_active);
+
+       kfree(spd);
+       sb->s_fs_info = NULL;
+}
+
+static int sdcardfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+       int err;
+       struct path lower_path;
+       u32 min_blocks;
+       struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+
+       sdcardfs_get_lower_path(dentry, &lower_path);
+       err = vfs_statfs(&lower_path, buf);
+       sdcardfs_put_lower_path(dentry, &lower_path);
+
+       if (sbi->options.reserved_mb) {
+               /* Invalid statfs informations. */
+               if (buf->f_bsize == 0) {
+                       printk(KERN_ERR "Returned block size is zero.\n");
+                       return -EINVAL;
+               }
+
+               min_blocks = ((sbi->options.reserved_mb * 1024 * 1024)/buf->f_bsize);
+               buf->f_blocks -= min_blocks;
+
+               if (buf->f_bavail > min_blocks)
+                       buf->f_bavail -= min_blocks;
+               else
+                       buf->f_bavail = 0;
+
+               /* Make reserved blocks invisiable to media storage */
+               buf->f_bfree = buf->f_bavail;
+       }
+
+       /* set return buf to our f/s to avoid confusing user-level utils */
+       buf->f_type = SDCARDFS_SUPER_MAGIC;
+
+       return err;
+}
+
+/*
+ * @flags: numeric mount options
+ * @options: mount options string
+ */
+static int sdcardfs_remount_fs(struct super_block *sb, int *flags, char *options)
+{
+       int err = 0;
+
+       /*
+        * The VFS will take care of "ro" and "rw" flags among others.  We
+        * can safely accept a few flags (RDONLY, MANDLOCK), and honor
+        * SILENT, but anything else left over is an error.
+        */
+       if ((*flags & ~(MS_RDONLY | MS_MANDLOCK | MS_SILENT)) != 0) {
+               printk(KERN_ERR
+                      "sdcardfs: remount flags 0x%x unsupported\n", *flags);
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+/*
+ * Called by iput() when the inode reference count reached zero
+ * and the inode is not hashed anywhere.  Used to clear anything
+ * that needs to be, before the inode is completely destroyed and put
+ * on the inode free list.
+ */
+static void sdcardfs_evict_inode(struct inode *inode)
+{
+       struct inode *lower_inode;
+
+       truncate_inode_pages(&inode->i_data, 0);
+       clear_inode(inode);
+       /*
+        * Decrement a reference to a lower_inode, which was incremented
+        * by our read_inode when it was created initially.
+        */
+       lower_inode = sdcardfs_lower_inode(inode);
+       sdcardfs_set_lower_inode(inode, NULL);
+       iput(lower_inode);
+}
+
+static struct inode *sdcardfs_alloc_inode(struct super_block *sb)
+{
+       struct sdcardfs_inode_info *i;
+
+       i = kmem_cache_alloc(sdcardfs_inode_cachep, GFP_KERNEL);
+       if (!i)
+               return NULL;
+
+       /* memset everything up to the inode to 0 */
+       memset(i, 0, offsetof(struct sdcardfs_inode_info, vfs_inode));
+
+       i->vfs_inode.i_version = 1;
+       return &i->vfs_inode;
+}
+
+static void sdcardfs_destroy_inode(struct inode *inode)
+{
+       kmem_cache_free(sdcardfs_inode_cachep, SDCARDFS_I(inode));
+}
+
+/* sdcardfs inode cache constructor */
+static void init_once(void *obj)
+{
+       struct sdcardfs_inode_info *i = obj;
+
+       inode_init_once(&i->vfs_inode);
+}
+
+int sdcardfs_init_inode_cache(void)
+{
+       int err = 0;
+
+       sdcardfs_inode_cachep =
+               kmem_cache_create("sdcardfs_inode_cache",
+                                 sizeof(struct sdcardfs_inode_info), 0,
+                                 SLAB_RECLAIM_ACCOUNT, init_once);
+       if (!sdcardfs_inode_cachep)
+               err = -ENOMEM;
+       return err;
+}
+
+/* sdcardfs inode cache destructor */
+void sdcardfs_destroy_inode_cache(void)
+{
+       if (sdcardfs_inode_cachep)
+               kmem_cache_destroy(sdcardfs_inode_cachep);
+}
+
+/*
+ * Used only in nfs, to kill any pending RPC tasks, so that subsequent
+ * code can actually succeed and won't leave tasks that need handling.
+ */
+static void sdcardfs_umount_begin(struct super_block *sb)
+{
+       struct super_block *lower_sb;
+
+       lower_sb = sdcardfs_lower_super(sb);
+       if (lower_sb && lower_sb->s_op && lower_sb->s_op->umount_begin)
+               lower_sb->s_op->umount_begin(lower_sb);
+}
+
+static int sdcardfs_show_options(struct seq_file *m, struct dentry *root)
+{
+       struct sdcardfs_sb_info *sbi = SDCARDFS_SB(root->d_sb);
+       struct sdcardfs_mount_options *opts = &sbi->options;
+
+       if (opts->fs_low_uid != 0)
+               seq_printf(m, ",uid=%u", opts->fs_low_uid);
+       if (opts->fs_low_gid != 0)
+               seq_printf(m, ",gid=%u", opts->fs_low_gid);
+
+       if (opts->multiuser)
+               seq_printf(m, ",multiuser");
+
+       if (opts->reserved_mb != 0)
+               seq_printf(m, ",reserved=%uMB", opts->reserved_mb);
+
+       return 0;
+};
+
+const struct super_operations sdcardfs_sops = {
+       .put_super      = sdcardfs_put_super,
+       .statfs         = sdcardfs_statfs,
+       .remount_fs     = sdcardfs_remount_fs,
+       .evict_inode    = sdcardfs_evict_inode,
+       .umount_begin   = sdcardfs_umount_begin,
+       .show_options   = sdcardfs_show_options,
+       .alloc_inode    = sdcardfs_alloc_inode,
+       .destroy_inode  = sdcardfs_destroy_inode,
+       .drop_inode     = generic_delete_inode,
+};
index 015547330e88699dccb37fe1d4509e1dc07394f7..09e71a00a9b8dc49a3d3bdc32eb7b9134916fdcd 100644 (file)
@@ -70,9 +70,9 @@ static long __estimate_accuracy(struct timespec *tv)
        return slack;
 }
 
-long select_estimate_accuracy(struct timespec *tv)
+u64 select_estimate_accuracy(struct timespec *tv)
 {
-       unsigned long ret;
+       u64 ret;
        struct timespec now;
 
        /*
@@ -402,7 +402,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
        struct poll_wqueues table;
        poll_table *wait;
        int retval, i, timed_out = 0;
-       unsigned long slack = 0;
+       u64 slack = 0;
        unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
        unsigned long busy_end = 0;
 
@@ -784,7 +784,7 @@ static int do_poll(unsigned int nfds,  struct poll_list *list,
        poll_table* pt = &wait->pt;
        ktime_t expire, *to = NULL;
        int timed_out = 0, count = 0;
-       unsigned long slack = 0;
+       u64 slack = 0;
        unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
        unsigned long busy_end = 0;
 
index d4d2591b77c83c644a49bc409e2575c401775e3b..b938b14f6041b97a8f8b2681016dcb08a8438ce6 100644 (file)
@@ -790,7 +790,7 @@ static void do_emergency_remount(struct work_struct *work)
        struct super_block *sb, *p = NULL;
 
        spin_lock(&sb_lock);
-       list_for_each_entry(sb, &super_blocks, s_list) {
+       list_for_each_entry_reverse(sb, &super_blocks, s_list) {
                if (hlist_unhashed(&sb->s_instances))
                        continue;
                sb->s_count++;
index 66cdb44616d5a0ba79a04f60de5fd835805b6e56..d473e6e07a7e66a05860347b578e0854c92e1805 100644 (file)
@@ -457,7 +457,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
                                 new_flags, vma->anon_vma,
                                 vma->vm_file, vma->vm_pgoff,
                                 vma_policy(vma),
-                                NULL_VM_UFFD_CTX);
+                                NULL_VM_UFFD_CTX,
+                                vma_get_anon_name(vma));
                if (prev)
                        vma = prev;
                else
@@ -833,7 +834,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
                prev = vma_merge(mm, prev, start, vma_end, new_flags,
                                 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
                                 vma_policy(vma),
-                                ((struct vm_userfaultfd_ctx){ ctx }));
+                                ((struct vm_userfaultfd_ctx){ ctx }),
+                                vma_get_anon_name(vma));
                if (prev) {
                        vma = prev;
                        goto next;
@@ -967,7 +969,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                prev = vma_merge(mm, prev, start, vma_end, new_flags,
                                 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
                                 vma_policy(vma),
-                                NULL_VM_UFFD_CTX);
+                                NULL_VM_UFFD_CTX,
+                                vma_get_anon_name(vma));
                if (prev) {
                        vma = prev;
                        goto next;
index 71581125e60402b26f217456f9638466181e19ca..a65eedc15e939fd5f4cbf729bfa2df5cf7ffc779 100644 (file)
        . = ALIGN(align);                                               \
        *(.data..init_task)
 
+/*
+ * Allow architectures to handle ro_after_init data on their
+ * own by defining an empty RO_AFTER_INIT_DATA.
+ */
+#ifndef RO_AFTER_INIT_DATA
+#define RO_AFTER_INIT_DATA *(.data..ro_after_init)
+#endif
+
 /*
  * Read only Data
  */
        .rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {           \
                VMLINUX_SYMBOL(__start_rodata) = .;                     \
                *(.rodata) *(.rodata.*)                                 \
-               *(.data..ro_after_init) /* Read only after init */      \
+               RO_AFTER_INIT_DATA      /* Read only after init */      \
                *(__vermagic)           /* Kernel version magic */      \
                . = ALIGN(8);                                           \
                VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;         \
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
new file mode 100644 (file)
index 0000000..a460889
--- /dev/null
@@ -0,0 +1,2 @@
+header-y += if_pppolac.h
+header-y += if_pppopns.h
index 8c98113069ce9264a2bdf2fe7ae10198ceed0b9d..eff56cb0016a7c2db44aeca2e83fcd33b9dbd971 100644 (file)
@@ -5,6 +5,15 @@
 #define AMBA_MMCI_H
 
 #include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+
+struct embedded_sdio_data {
+        struct sdio_cis cis;
+        struct sdio_cccr cccr;
+        struct sdio_embedded_func *funcs;
+        int num_funcs;
+};
 
 /**
  * struct mmci_platform_data - platform configuration for the MMCI
@@ -31,6 +40,7 @@ struct mmci_platform_data {
        int     gpio_wp;
        int     gpio_cd;
        bool    cd_invert;
+       struct embedded_sdio_data *embedded_sdio;
 };
 
 #endif
diff --git a/include/linux/android_aid.h b/include/linux/android_aid.h
new file mode 100644 (file)
index 0000000..6f1fa17
--- /dev/null
@@ -0,0 +1,28 @@
+/* include/linux/android_aid.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_AID_H
+#define _LINUX_ANDROID_AID_H
+
+/* AIDs that the kernel treats differently */
+#define AID_OBSOLETE_000 KGIDT_INIT(3001)  /* was NET_BT_ADMIN */
+#define AID_OBSOLETE_001 KGIDT_INIT(3002)  /* was NET_BT */
+#define AID_INET         KGIDT_INIT(3003)
+#define AID_NET_RAW      KGIDT_INIT(3004)
+#define AID_NET_ADMIN    KGIDT_INIT(3005)
+#define AID_NET_BW_STATS KGIDT_INIT(3006)  /* read bandwidth statistics */
+#define AID_NET_BW_ACCT  KGIDT_INIT(3007)  /* change bandwidth statistics accounting */
+
+#endif
index fe14382f966451b0dd1448634788845fee6e282c..a9562bb029d00ad3dddd551ec35ef9f361c44872 100644 (file)
@@ -197,6 +197,9 @@ struct request {
 
        /* for bidi */
        struct request *next_rq;
+
+       ktime_t                 lat_hist_io_start;
+       int                     lat_hist_enabled;
 };
 
 static inline unsigned short req_get_ioprio(struct request *req)
@@ -1656,6 +1659,79 @@ extern int bdev_write_page(struct block_device *, sector_t, struct page *,
                                                struct writeback_control *);
 extern long bdev_direct_access(struct block_device *, sector_t,
                void __pmem **addr, unsigned long *pfn, long size);
+
+/*
+ * X-axis for IO latency histogram support.
+ */
+static const u_int64_t latency_x_axis_us[] = {
+       100,
+       200,
+       300,
+       400,
+       500,
+       600,
+       700,
+       800,
+       900,
+       1000,
+       1200,
+       1400,
+       1600,
+       1800,
+       2000,
+       2500,
+       3000,
+       4000,
+       5000,
+       6000,
+       7000,
+       9000,
+       10000
+};
+
+#define BLK_IO_LAT_HIST_DISABLE         0
+#define BLK_IO_LAT_HIST_ENABLE          1
+#define BLK_IO_LAT_HIST_ZERO            2
+
+struct io_latency_state {
+       u_int64_t       latency_y_axis_read[ARRAY_SIZE(latency_x_axis_us) + 1];
+       u_int64_t       latency_reads_elems;
+       u_int64_t       latency_y_axis_write[ARRAY_SIZE(latency_x_axis_us) + 1];
+       u_int64_t       latency_writes_elems;
+};
+
+static inline void
+blk_update_latency_hist(struct io_latency_state *s,
+                       int read,
+                       u_int64_t delta_us)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(latency_x_axis_us); i++) {
+               if (delta_us < (u_int64_t)latency_x_axis_us[i]) {
+                       if (read)
+                               s->latency_y_axis_read[i]++;
+                       else
+                               s->latency_y_axis_write[i]++;
+                       break;
+               }
+       }
+       if (i == ARRAY_SIZE(latency_x_axis_us)) {
+               /* Overflowed the histogram */
+               if (read)
+                       s->latency_y_axis_read[i]++;
+               else
+                       s->latency_y_axis_write[i]++;
+       }
+       if (read)
+               s->latency_reads_elems++;
+       else
+               s->latency_writes_elems++;
+}
+
+void blk_zero_latency_hist(struct io_latency_state *s);
+ssize_t blk_latency_hist_show(struct io_latency_state *s, char *buf);
+
 #else /* CONFIG_BLOCK */
 
 struct block_device;
index 1a96fdaa33d54befd4fde55b5af79c1c51b887ac..e133705d794a29e32a4862e433f67507d7b25a17 100644 (file)
@@ -26,6 +26,10 @@ SUBSYS(cpu)
 SUBSYS(cpuacct)
 #endif
 
+#if IS_ENABLED(CONFIG_CGROUP_SCHEDTUNE)
+SUBSYS(schedtune)
+#endif
+
 #if IS_ENABLED(CONFIG_BLK_CGROUP)
 SUBSYS(io)
 #endif
index d2ca8c38f9c45c0b279ef329c7abf130fcd0dc61..7c73824def83a9e7dda328a79485d830df375361 100644 (file)
@@ -290,4 +290,11 @@ bool cpu_wait_death(unsigned int cpu, int seconds);
 bool cpu_report_death(void);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
+#define IDLE_START 1
+#define IDLE_END 2
+
+void idle_notifier_register(struct notifier_block *n);
+void idle_notifier_unregister(struct notifier_block *n);
+void idle_notifier_call_chain(unsigned long val);
+
 #endif /* _LINUX_CPU_H_ */
index 177c7680c1a8a81bcc942497ee228c148fbf5a0b..60571292a8020178006db5f4ab6096728af99836 100644 (file)
@@ -160,6 +160,7 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
 int cpufreq_update_policy(unsigned int cpu);
 bool have_governor_per_policy(void);
+bool cpufreq_driver_is_slow(void);
 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
 #else
 static inline unsigned int cpufreq_get(unsigned int cpu)
@@ -317,6 +318,14 @@ struct cpufreq_driver {
  */
 #define CPUFREQ_NEED_INITIAL_FREQ_CHECK        (1 << 5)
 
+/*
+ * Indicates that it is safe to call cpufreq_driver_target from
+ * non-interruptable context in scheduler hot paths.  Drivers must
+ * opt-in to this flag, as the safe default is that they might sleep
+ * or be too slow for hot path use.
+ */
+#define CPUFREQ_DRIVER_FAST            (1 << 6)
+
 int cpufreq_register_driver(struct cpufreq_driver *driver_data);
 int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
 
@@ -487,6 +496,12 @@ extern struct cpufreq_governor cpufreq_gov_ondemand;
 #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
 extern struct cpufreq_governor cpufreq_gov_conservative;
 #define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_conservative)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
+extern struct cpufreq_governor cpufreq_gov_interactive;
+#define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_interactive)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED)
+extern struct cpufreq_governor cpufreq_gov_sched;
+#define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_sched)
 #endif
 
 /*********************************************************************
@@ -616,4 +631,8 @@ unsigned int cpufreq_generic_get(unsigned int cpu);
 int cpufreq_generic_init(struct cpufreq_policy *policy,
                struct cpufreq_frequency_table *table,
                unsigned int transition_latency);
+
+struct sched_domain;
+unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu);
+unsigned long cpufreq_scale_max_freq_capacity(int cpu);
 #endif /* _LINUX_CPUFREQ_H */
index 786ad32631a672695d88b47d67c1019df624acab..6eae1576499e06b421592236e6e2f3c9c550efb9 100644 (file)
@@ -204,7 +204,7 @@ static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
 #endif
 
 /* kernel/sched/idle.c */
-extern void sched_idle_set_state(struct cpuidle_state *idle_state);
+extern void sched_idle_set_state(struct cpuidle_state *idle_state, int index);
 extern void default_idle_call(void);
 
 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
index 8d7151eb6cebc94d8b7197a64539803154058cdb..702b6c53c12ff5bcfcd0bb8d41340fb035edb625 100644 (file)
@@ -161,6 +161,7 @@ struct dentry_operations {
        struct vfsmount *(*d_automount)(struct path *);
        int (*d_manage)(struct dentry *, bool);
        struct inode *(*d_select_inode)(struct dentry *, unsigned);
+       void (*d_canonical_path)(const struct path *, struct path *);
        struct dentry *(*d_real)(struct dentry *, struct inode *);
 } ____cacheline_aligned;
 
index 899ab9f8549e2bd489f36db0c47c855e5c92e19d..b874d5b61ffce6eefcc9dfbaef4ab907dac3d735 100644 (file)
@@ -382,6 +382,12 @@ void dm_put(struct mapped_device *md);
 void dm_set_mdptr(struct mapped_device *md, void *ptr);
 void *dm_get_mdptr(struct mapped_device *md);
 
+/*
+ * Export the device via the ioctl interface (uses mdptr).
+ */
+int dm_ioctl_export(struct mapped_device *md, const char *name,
+                   const char *uuid);
+
 /*
  * A device can still be used while suspended, but I/O is deferred.
  */
index bb522011383bca52ba248110a2a4c1e86d1f5aa3..ce4434814b8bce38d05f88e00675d9577359c166 100644 (file)
@@ -107,6 +107,7 @@ struct fence_cb {
  * @get_driver_name: returns the driver name.
  * @get_timeline_name: return the name of the context this fence belongs to.
  * @enable_signaling: enable software signaling of fence.
+ * @disable_signaling: disable software signaling of fence (optional).
  * @signaled: [optional] peek whether the fence is signaled, can be null.
  * @wait: custom wait implementation, or fence_default_wait.
  * @release: [optional] called on destruction of fence, can be null
@@ -166,6 +167,7 @@ struct fence_ops {
        const char * (*get_driver_name)(struct fence *fence);
        const char * (*get_timeline_name)(struct fence *fence);
        bool (*enable_signaling)(struct fence *fence);
+       void (*disable_signaling)(struct fence *fence);
        bool (*signaled)(struct fence *fence);
        signed long (*wait)(struct fence *fence, bool intr, signed long timeout);
        void (*release)(struct fence *fence);
index 6b7fd9cf5ea2f282b4972c0245ba33f807c61f2f..dd03e837ebb7fa4c4dd5f86ada677595367d3dba 100644 (file)
@@ -231,7 +231,7 @@ static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
  * call this with locks held.
  */
 static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
-               unsigned long delta, const enum hrtimer_mode mode)
+               u64 delta, const enum hrtimer_mode mode)
 {
        int __retval;
        freezer_do_not_count();
diff --git a/include/linux/gpio_event.h b/include/linux/gpio_event.h
new file mode 100644 (file)
index 0000000..2613fc5
--- /dev/null
@@ -0,0 +1,170 @@
+/* include/linux/gpio_event.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_GPIO_EVENT_H
+#define _LINUX_GPIO_EVENT_H
+
+#include <linux/input.h>
+
+struct gpio_event_input_devs {
+       int count;
+       struct input_dev *dev[];
+};
+enum {
+       GPIO_EVENT_FUNC_UNINIT  = 0x0,
+       GPIO_EVENT_FUNC_INIT    = 0x1,
+       GPIO_EVENT_FUNC_SUSPEND = 0x2,
+       GPIO_EVENT_FUNC_RESUME  = 0x3,
+};
+struct gpio_event_info {
+       int (*func)(struct gpio_event_input_devs *input_devs,
+                   struct gpio_event_info *info,
+                   void **data, int func);
+       int (*event)(struct gpio_event_input_devs *input_devs,
+                    struct gpio_event_info *info,
+                    void **data, unsigned int dev, unsigned int type,
+                    unsigned int code, int value); /* out events */
+       bool no_suspend;
+};
+
+struct gpio_event_platform_data {
+       const char *name;
+       struct gpio_event_info **info;
+       size_t info_count;
+       int (*power)(const struct gpio_event_platform_data *pdata, bool on);
+       const char *names[]; /* If name is NULL, names contain a NULL */
+                            /* terminated list of input devices to create */
+};
+
+#define GPIO_EVENT_DEV_NAME "gpio-event"
+
+/* Key matrix */
+
+enum gpio_event_matrix_flags {
+       /* unset: drive active output low, set: drive active output high */
+       GPIOKPF_ACTIVE_HIGH              = 1U << 0,
+       GPIOKPF_DEBOUNCE                 = 1U << 1,
+       GPIOKPF_REMOVE_SOME_PHANTOM_KEYS = 1U << 2,
+       GPIOKPF_REMOVE_PHANTOM_KEYS      = GPIOKPF_REMOVE_SOME_PHANTOM_KEYS |
+                                          GPIOKPF_DEBOUNCE,
+       GPIOKPF_DRIVE_INACTIVE           = 1U << 3,
+       GPIOKPF_LEVEL_TRIGGERED_IRQ      = 1U << 4,
+       GPIOKPF_PRINT_UNMAPPED_KEYS      = 1U << 16,
+       GPIOKPF_PRINT_MAPPED_KEYS        = 1U << 17,
+       GPIOKPF_PRINT_PHANTOM_KEYS       = 1U << 18,
+};
+
+#define MATRIX_CODE_BITS (10)
+#define MATRIX_KEY_MASK ((1U << MATRIX_CODE_BITS) - 1)
+#define MATRIX_KEY(dev, code) \
+       (((dev) << MATRIX_CODE_BITS) | (code & MATRIX_KEY_MASK))
+
+extern int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+                       struct gpio_event_info *info, void **data, int func);
+struct gpio_event_matrix_info {
+       /* initialize to gpio_event_matrix_func */
+       struct gpio_event_info info;
+       /* size must be ninputs * noutputs */
+       const unsigned short *keymap;
+       unsigned int *input_gpios;
+       unsigned int *output_gpios;
+       unsigned int ninputs;
+       unsigned int noutputs;
+       /* time to wait before reading inputs after driving each output */
+       ktime_t settle_time;
+       /* time to wait before scanning the keypad a second time */
+       ktime_t debounce_delay;
+       ktime_t poll_time;
+       unsigned flags;
+};
+
+/* Directly connected inputs and outputs */
+
+enum gpio_event_direct_flags {
+       GPIOEDF_ACTIVE_HIGH         = 1U << 0,
+/*     GPIOEDF_USE_DOWN_IRQ        = 1U << 1, */
+/*     GPIOEDF_USE_IRQ             = (1U << 2) | GPIOIDF_USE_DOWN_IRQ, */
+       GPIOEDF_PRINT_KEYS          = 1U << 8,
+       GPIOEDF_PRINT_KEY_DEBOUNCE  = 1U << 9,
+       GPIOEDF_PRINT_KEY_UNSTABLE  = 1U << 10,
+};
+
+struct gpio_event_direct_entry {
+       uint32_t gpio:16;
+       uint32_t code:10;
+       uint32_t dev:6;
+};
+
+/* inputs */
+extern int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+                       struct gpio_event_info *info, void **data, int func);
+struct gpio_event_input_info {
+       /* initialize to gpio_event_input_func */
+       struct gpio_event_info info;
+       ktime_t debounce_time;
+       ktime_t poll_time;
+       uint16_t flags;
+       uint16_t type;
+       const struct gpio_event_direct_entry *keymap;
+       size_t keymap_size;
+};
+
+/* outputs */
+extern int gpio_event_output_func(struct gpio_event_input_devs *input_devs,
+                       struct gpio_event_info *info, void **data, int func);
+extern int gpio_event_output_event(struct gpio_event_input_devs *input_devs,
+                       struct gpio_event_info *info, void **data,
+                       unsigned int dev, unsigned int type,
+                       unsigned int code, int value);
+struct gpio_event_output_info {
+       /* initialize to gpio_event_output_func and gpio_event_output_event */
+       struct gpio_event_info info;
+       uint16_t flags;
+       uint16_t type;
+       const struct gpio_event_direct_entry *keymap;
+       size_t keymap_size;
+};
+
+
+/* axes */
+
+enum gpio_event_axis_flags {
+       GPIOEAF_PRINT_UNKNOWN_DIRECTION  = 1U << 16,
+       GPIOEAF_PRINT_RAW                = 1U << 17,
+       GPIOEAF_PRINT_EVENT              = 1U << 18,
+};
+
+extern int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+                       struct gpio_event_info *info, void **data, int func);
+struct gpio_event_axis_info {
+       /* initialize to gpio_event_axis_func */
+       struct gpio_event_info info;
+       uint8_t  count; /* number of gpios for this axis */
+       uint8_t  dev; /* device index when using multiple input devices */
+       uint8_t  type; /* EV_REL or EV_ABS */
+       uint16_t code;
+       uint16_t decoded_size;
+       uint16_t (*map)(struct gpio_event_axis_info *info, uint16_t in);
+       uint32_t *gpio;
+       uint32_t flags;
+};
+#define gpio_axis_2bit_gray_map gpio_axis_4bit_gray_map
+#define gpio_axis_3bit_gray_map gpio_axis_4bit_gray_map
+uint16_t gpio_axis_4bit_gray_map(
+                       struct gpio_event_axis_info *info, uint16_t in);
+uint16_t gpio_axis_5bit_singletrack_map(
+                       struct gpio_event_axis_info *info, uint16_t in);
+
+#endif
index 2ead22dd74a00896d24fdb937242e8ba36a77cfe..c98c6539e2c2d40cf6a134b7fb33c50accc90d76 100644 (file)
@@ -220,7 +220,7 @@ static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time
        timer->node.expires = ktime_add_safe(time, delta);
 }
 
-static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
+static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta)
 {
        timer->_softexpires = time;
        timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
@@ -378,7 +378,7 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
 
 /* Basic timer operations: */
 extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
-                       unsigned long range_ns, const enum hrtimer_mode mode);
+                                  u64 range_ns, const enum hrtimer_mode mode);
 
 /**
  * hrtimer_start - (re)start an hrtimer on the current CPU
@@ -399,7 +399,7 @@ extern int hrtimer_try_to_cancel(struct hrtimer *timer);
 static inline void hrtimer_start_expires(struct hrtimer *timer,
                                         enum hrtimer_mode mode)
 {
-       unsigned long delta;
+       u64 delta;
        ktime_t soft, hard;
        soft = hrtimer_get_softexpires(timer);
        hard = hrtimer_get_expires(timer);
@@ -477,10 +477,12 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
 extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
                                 struct task_struct *tsk);
 
-extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
+extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
                                                const enum hrtimer_mode mode);
 extern int schedule_hrtimeout_range_clock(ktime_t *expires,
-               unsigned long delta, const enum hrtimer_mode mode, int clock);
+                                         u64 delta,
+                                         const enum hrtimer_mode mode,
+                                         int clock);
 extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
 
 /* Soft interrupt function to run the hrtimer queues: */
diff --git a/include/linux/if_pppolac.h b/include/linux/if_pppolac.h
new file mode 100644 (file)
index 0000000..e40aa10
--- /dev/null
@@ -0,0 +1,23 @@
+/* include/linux/if_pppolac.h
+ *
+ * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOLAC_H
+#define __LINUX_IF_PPPOLAC_H
+
+#include <uapi/linux/if_pppolac.h>
+
+#endif /* __LINUX_IF_PPPOLAC_H */
diff --git a/include/linux/if_pppopns.h b/include/linux/if_pppopns.h
new file mode 100644 (file)
index 0000000..4ac621a
--- /dev/null
@@ -0,0 +1,23 @@
+/* include/linux/if_pppopns.h
+ *
+ * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOPNS_H
+#define __LINUX_IF_PPPOPNS_H
+
+#include <uapi/linux/if_pppopns.h>
+
+#endif /* __LINUX_IF_PPPOPNS_H */
index b49cf923becc2405b3d99ef6f88e5a524423e7a3..63828a5870f1cec1d39939bf237f24b2750320b1 100644 (file)
@@ -43,6 +43,25 @@ struct pptp_opt {
        u32 seq_sent, seq_recv;
        int ppp_flags;
 };
+
+struct pppolac_opt {
+       __u32           local;
+       __u32           remote;
+       __u32           recv_sequence;
+       __u32           xmit_sequence;
+       atomic_t        sequencing;
+       int             (*backlog_rcv)(struct sock *sk_udp, struct sk_buff *skb);
+};
+
+struct pppopns_opt {
+       __u16           local;
+       __u16           remote;
+       __u32           recv_sequence;
+       __u32           xmit_sequence;
+       void            (*data_ready)(struct sock *sk_raw);
+       int             (*backlog_rcv)(struct sock *sk_raw, struct sk_buff *skb);
+};
+
 #include <net/sock.h>
 
 struct pppox_sock {
@@ -53,6 +72,8 @@ struct pppox_sock {
        union {
                struct pppoe_opt pppoe;
                struct pptp_opt  pptp;
+               struct pppolac_opt lac;
+               struct pppopns_opt pns;
        } proto;
        __be16                  num;
 };
index 0e707f0c1a3ed1e747bf11ec9477fbae95468bff..795852dc343407520b0a273ac7cde8891d5b310b 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <uapi/linux/inet_diag.h>
 
+struct net;
 struct sock;
 struct inet_hashinfo;
 struct nlattr;
@@ -23,6 +24,10 @@ struct inet_diag_handler {
        void            (*idiag_get_info)(struct sock *sk,
                                          struct inet_diag_msg *r,
                                          void *info);
+
+       int             (*destroy)(struct sk_buff *in_skb,
+                                  const struct inet_diag_req_v2 *req);
+
        __u16           idiag_type;
        __u16           idiag_info_size;
 };
@@ -32,7 +37,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                      struct sk_buff *skb, const struct inet_diag_req_v2 *req,
                      struct user_namespace *user_ns,
                      u32 pid, u32 seq, u16 nlmsg_flags,
-                     const struct nlmsghdr *unlh);
+                     const struct nlmsghdr *unlh, bool net_admin);
 void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
                         struct netlink_callback *cb,
                         const struct inet_diag_req_v2 *r,
@@ -41,6 +46,10 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
                            struct sk_buff *in_skb, const struct nlmsghdr *nlh,
                            const struct inet_diag_req_v2 *req);
 
+struct sock *inet_diag_find_one_icsk(struct net *net,
+                                    struct inet_hashinfo *hashinfo,
+                                    const struct inet_diag_req_v2 *req);
+
 int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
 
 extern int  inet_diag_register(const struct inet_diag_handler *handler);
diff --git a/include/linux/initramfs.h b/include/linux/initramfs.h
new file mode 100644 (file)
index 0000000..fc7da63
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * include/linux/initramfs.h
+ *
+ * Copyright (C) 2015, Google
+ * Rom Lemarchand <romlem@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_INITRAMFS_H
+#define _LINUX_INITRAMFS_H
+
+#include <linux/kconfig.h>
+
+#if IS_BUILTIN(CONFIG_BLK_DEV_INITRD)
+
+int __init default_rootfs(void);
+
+#endif
+
+#endif /* _LINUX_INITRAMFS_H */
index 402753bccafa37b4ec1e597902f608548fbdbd22..ce777260e9ea73f2d18dd538ef6bf57ee08e063b 100644 (file)
@@ -39,6 +39,7 @@ struct ipv6_devconf {
        __s32           accept_ra_rt_info_max_plen;
 #endif
 #endif
+       __s32           accept_ra_rt_table;
        __s32           proxy_ndp;
        __s32           accept_source_route;
        __s32           accept_ra_from_local;
diff --git a/include/linux/keychord.h b/include/linux/keychord.h
new file mode 100644 (file)
index 0000000..08cf540
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ *  Key chord input driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_KEYCHORD_H_
+#define __LINUX_KEYCHORD_H_
+
+#include <uapi/linux/keychord.h>
+
+#endif /* __LINUX_KEYCHORD_H_ */
diff --git a/include/linux/keycombo.h b/include/linux/keycombo.h
new file mode 100644 (file)
index 0000000..c6db262
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * include/linux/keycombo.h - platform data structure for keycombo driver
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_KEYCOMBO_H
+#define _LINUX_KEYCOMBO_H
+
+#define KEYCOMBO_NAME "keycombo"
+
+/*
+ * if key_down_fn and key_up_fn are both present, you are guaranteed that
+ * key_down_fn will return before key_up_fn is called, and that key_up_fn
+ * is called iff key_down_fn is called.
+ */
+struct keycombo_platform_data {
+       void (*key_down_fn)(void *);
+       void (*key_up_fn)(void *);
+       void *priv;
+       int key_down_delay; /* Time in ms */
+       int *keys_up;
+       int keys_down[]; /* 0 terminated */
+};
+
+#endif /* _LINUX_KEYCOMBO_H */
diff --git a/include/linux/keyreset.h b/include/linux/keyreset.h
new file mode 100644 (file)
index 0000000..2e34afa
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * include/linux/keyreset.h - platform data structure for resetkeys driver
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_KEYRESET_H
+#define _LINUX_KEYRESET_H
+
+#define KEYRESET_NAME "keyreset"
+
+struct keyreset_platform_data {
+       int (*reset_fn)(void);
+       int key_down_delay;
+       int *keys_up;
+       int keys_down[]; /* 0 terminated */
+};
+
+#endif /* _LINUX_KEYRESET_H */
index 24daf8fc4d7c71e4c36ed0063849c01a3b31d022..fec66f86eeffedbcfd8c86d6ec8f313b6859932c 100644 (file)
@@ -25,6 +25,7 @@ enum {
        MEMBLOCK_NONE           = 0x0,  /* No special request */
        MEMBLOCK_HOTPLUG        = 0x1,  /* hotpluggable region */
        MEMBLOCK_MIRROR         = 0x2,  /* mirrored region */
+       MEMBLOCK_NOMAP          = 0x4,  /* don't add to kernel direct mapping */
 };
 
 struct memblock_region {
@@ -82,6 +83,7 @@ bool memblock_overlaps_region(struct memblock_type *type,
 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
+int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
 ulong choose_memblock_flags(void);
 
 /* Low level functions */
@@ -184,6 +186,11 @@ static inline bool memblock_is_mirror(struct memblock_region *m)
        return m->flags & MEMBLOCK_MIRROR;
 }
 
+static inline bool memblock_is_nomap(struct memblock_region *m)
+{
+       return m->flags & MEMBLOCK_NOMAP;
+}
+
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
                            unsigned long  *end_pfn);
@@ -319,6 +326,7 @@ phys_addr_t memblock_start_of_DRAM(void);
 phys_addr_t memblock_end_of_DRAM(void);
 void memblock_enforce_memory_limit(phys_addr_t memory_limit);
 int memblock_is_memory(phys_addr_t addr);
+int memblock_is_map_memory(phys_addr_t addr);
 int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
 int memblock_is_reserved(phys_addr_t addr);
 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
diff --git a/include/linux/memory-state-time.h b/include/linux/memory-state-time.h
new file mode 100644 (file)
index 0000000..d2212b0
--- /dev/null
@@ -0,0 +1,42 @@
+/* include/linux/memory-state-time.h
+ *
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/workqueue.h>
+
+#define UPDATE_MEMORY_STATE(BLOCK, VALUE) BLOCK->update_call(BLOCK, VALUE)
+
+struct memory_state_update_block;
+
+typedef void (*memory_state_update_fn_t)(struct memory_state_update_block *ub,
+               int value);
+
+/* This struct is populated when you pass it to a memory_state_register*
+ * function. The update_call function is used for an update and defined in the
+ * typedef memory_state_update_fn_t
+ */
+struct memory_state_update_block {
+       memory_state_update_fn_t update_call;
+       int id;
+};
+
+/* Register a frequency struct memory_state_update_block to provide updates to
+ * memory_state_time about frequency changes using its update_call function.
+ */
+struct memory_state_update_block *memory_state_register_frequency_source(void);
+
+/* Register a bandwidth struct memory_state_update_block to provide updates to
+ * memory_state_time about bandwidth changes using its update_call function.
+ */
+struct memory_state_update_block *memory_state_register_bandwidth_source(void);
index f0ffa01c90d9b873a0ae2cbb9e23459d755c77c7..f8a729751faa41bd76a088e14a5936dbbf625abc 100644 (file)
@@ -51,6 +51,17 @@ extern int sysctl_legacy_va_layout;
 #define sysctl_legacy_va_layout 0
 #endif
 
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
+extern const int mmap_rnd_bits_min;
+extern const int mmap_rnd_bits_max;
+extern int mmap_rnd_bits __read_mostly;
+#endif
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
+extern const int mmap_rnd_compat_bits_min;
+extern const int mmap_rnd_compat_bits_max;
+extern int mmap_rnd_compat_bits __read_mostly;
+#endif
+
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/processor.h>
@@ -1059,6 +1070,7 @@ extern void pagefault_out_of_memory(void);
 extern void show_free_areas(unsigned int flags);
 extern bool skip_free_areas_node(unsigned int flags, int nid);
 
+void shmem_set_file(struct vm_area_struct *vma, struct file *file);
 int shmem_zero_setup(struct vm_area_struct *);
 #ifdef CONFIG_SHMEM
 bool shmem_mapping(struct address_space *mapping);
@@ -1866,7 +1878,7 @@ extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
 extern struct vm_area_struct *vma_merge(struct mm_struct *,
        struct vm_area_struct *prev, unsigned long addr, unsigned long end,
        unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
-       struct mempolicy *, struct vm_userfaultfd_ctx);
+       struct mempolicy *, struct vm_userfaultfd_ctx, const char __user *);
 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
 extern int split_vma(struct mm_struct *,
        struct vm_area_struct *, unsigned long addr, int new_below);
index f8d1492a114f4447d0d7f4dfd8136e854a4a7581..0a732c5e0de1a0a880ae10048566df7a0af9ae50 100644 (file)
@@ -323,11 +323,18 @@ struct vm_area_struct {
        /*
         * For areas with an address space and backing store,
         * linkage into the address_space->i_mmap interval tree.
+        *
+        * For private anonymous mappings, a pointer to a null terminated string
+        * in the user process containing the name given to the vma, or NULL
+        * if unnamed.
         */
-       struct {
-               struct rb_node rb;
-               unsigned long rb_subtree_last;
-       } shared;
+       union {
+               struct {
+                       struct rb_node rb;
+                       unsigned long rb_subtree_last;
+               } shared;
+               const char __user *anon_name;
+       };
 
        /*
         * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
@@ -591,4 +598,13 @@ typedef struct {
        unsigned long val;
 } swp_entry_t;
 
+/* Return the name for an anonymous mapping or NULL for a file-backed mapping */
+static inline const char __user *vma_get_anon_name(struct vm_area_struct *vma)
+{
+       if (vma->vm_file)
+               return NULL;
+
+       return vma->anon_name;
+}
+
 #endif /* _LINUX_MM_TYPES_H */
index 37967b6da03cf542d7a5762342c2089d93054b7a..0860efd6e1bedd32710f0528ee2f888111e1f528 100644 (file)
@@ -136,6 +136,10 @@ struct mmc_request {
        struct completion       completion;
        void                    (*done)(struct mmc_request *);/* completion function */
        struct mmc_host         *host;
+       ktime_t                 io_start;
+#ifdef CONFIG_BLOCK
+       int                     lat_hist_enabled;
+#endif
 };
 
 struct mmc_card;
index 8673ffe3d86ef83fc657cde31a43058b94840f61..97b2b0b1f99de03af7c6c7f56afc554fcca4aefd 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/sched.h>
 #include <linux/device.h>
 #include <linux/fault-inject.h>
+#include <linux/blkdev.h>
 
 #include <linux/mmc/core.h>
 #include <linux/mmc/card.h>
@@ -370,6 +371,20 @@ struct mmc_host {
        int                     dsr_req;        /* DSR value is valid */
        u32                     dsr;    /* optional driver stage (DSR) value */
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+       struct {
+               struct sdio_cis                 *cis;
+               struct sdio_cccr                *cccr;
+               struct sdio_embedded_func       *funcs;
+               int                             num_funcs;
+       } embedded_sdio_data;
+#endif
+
+#ifdef CONFIG_BLOCK
+       int                     latency_hist_enabled;
+       struct io_latency_state io_lat_s;
+#endif
+
        unsigned long           private[0] ____cacheline_aligned;
 };
 
@@ -379,6 +394,14 @@ void mmc_remove_host(struct mmc_host *);
 void mmc_free_host(struct mmc_host *);
 int mmc_of_parse(struct mmc_host *host);
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+extern void mmc_set_embedded_sdio_data(struct mmc_host *host,
+                                      struct sdio_cis *cis,
+                                      struct sdio_cccr *cccr,
+                                      struct sdio_embedded_func *funcs,
+                                      int num_funcs);
+#endif
+
 static inline void *mmc_priv(struct mmc_host *host)
 {
        return (void *)host->private;
index 4a139204c20c0bb8aab7a7759c74e7e5d5cde9a4..6e2d6a135c7e0d75f830af3a429fb5bc02ca07b2 100644 (file)
@@ -26,5 +26,6 @@ typedef unsigned int mmc_pm_flag_t;
 
 #define MMC_PM_KEEP_POWER      (1 << 0)        /* preserve card power during suspend */
 #define MMC_PM_WAKE_SDIO_IRQ   (1 << 1)        /* wake up host system on SDIO IRQ assertion */
+#define MMC_PM_IGNORE_PM_NOTIFY        (1 << 2)        /* ignore mmc pm notify */
 
 #endif /* LINUX_MMC_PM_H */
index aab032a6ae6124de63b7934f49d0dc564b0d2dc3..d0a69e71b8abac1c164229af896b44f805e25047 100644 (file)
@@ -22,6 +22,14 @@ struct sdio_func;
 
 typedef void (sdio_irq_handler_t)(struct sdio_func *);
 
+/*
+ * Structure used to hold embedded SDIO device data from platform layer
+ */
+struct sdio_embedded_func {
+       uint8_t f_class;
+       uint32_t f_maxblksize;
+};
+
 /*
  * SDIO function CIS tuple (unknown to the core)
  */
@@ -128,6 +136,8 @@ extern int sdio_release_irq(struct sdio_func *func);
 extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz);
 
 extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret);
+extern u8 sdio_readb_ext(struct sdio_func *func, unsigned int addr, int *err_ret,
+       unsigned in);
 extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret);
 extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret);
 
index d8c6334cd15005c16162f57959e20b2e09f99d03..d53c25453aca084e9b0390473323ac9897961167 100644 (file)
@@ -75,6 +75,8 @@ extern struct dentry *user_path_create(int, const char __user *, struct path *,
 extern void done_path_create(struct path *, struct dentry *);
 extern struct dentry *kern_path_locked(const char *, struct path *);
 extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
+extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
+               const char *, unsigned int, struct path *);
 
 extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
 
diff --git a/include/linux/netfilter/xt_qtaguid.h b/include/linux/netfilter/xt_qtaguid.h
new file mode 100644 (file)
index 0000000..ca60fbd
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef _XT_QTAGUID_MATCH_H
+#define _XT_QTAGUID_MATCH_H
+
+/* For now we just replace the xt_owner.
+ * FIXME: make iptables aware of qtaguid. */
+#include <linux/netfilter/xt_owner.h>
+
+#define XT_QTAGUID_UID    XT_OWNER_UID
+#define XT_QTAGUID_GID    XT_OWNER_GID
+#define XT_QTAGUID_SOCKET XT_OWNER_SOCKET
+#define xt_qtaguid_match_info xt_owner_match_info
+
+#endif /* _XT_QTAGUID_MATCH_H */
diff --git a/include/linux/netfilter/xt_quota2.h b/include/linux/netfilter/xt_quota2.h
new file mode 100644 (file)
index 0000000..eadc690
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef _XT_QUOTA_H
+#define _XT_QUOTA_H
+
+enum xt_quota_flags {
+       XT_QUOTA_INVERT    = 1 << 0,
+       XT_QUOTA_GROW      = 1 << 1,
+       XT_QUOTA_PACKET    = 1 << 2,
+       XT_QUOTA_NO_CHANGE = 1 << 3,
+       XT_QUOTA_MASK      = 0x0F,
+};
+
+struct xt_quota_counter;
+
+struct xt_quota_mtinfo2 {
+       char name[15];
+       u_int8_t flags;
+
+       /* Comparison-invariant */
+       aligned_u64 quota;
+
+       /* Used internally by the kernel */
+       struct xt_quota_counter *master __attribute__((aligned(8)));
+};
+
+#endif /* _XT_QUOTA_H */
index 7ec5b86735f33d512a27086635ac15a221039101..74385351935d4cb20bfceeebd7eff30ed1f6bff7 100644 (file)
  * may be used to reset the timeout - for code which intentionally
  * disables interrupts for a long time. This call is stateless.
  */
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR_NMI)
 #include <asm/nmi.h>
+#endif
+
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
 extern void touch_nmi_watchdog(void);
 #else
 static inline void touch_nmi_watchdog(void)
index df9ef380181285a5b196b18a925942346c836d63..fbbb89b690123c5bf61c10971c3f393d200b84e2 100644 (file)
@@ -59,6 +59,27 @@ extern int of_flat_dt_match(unsigned long node, const char *const *matches);
 extern unsigned long of_get_flat_dt_root(void);
 extern int of_get_flat_dt_size(void);
 
+/*
+ * early_init_dt_scan_chosen - scan the device tree for ramdisk and bootargs
+ *
+ * The boot arguments will be placed into the memory pointed to by @data.
+ * That memory should be COMMAND_LINE_SIZE big and initialized to be a valid
+ * (possibly empty) string.  Logic for what will be in @data after this
+ * function finishes:
+ *
+ * - CONFIG_CMDLINE_FORCE=true
+ *     CONFIG_CMDLINE
+ * - CONFIG_CMDLINE_EXTEND=true, @data is non-empty string
+ *     @data + dt bootargs (even if dt bootargs are empty)
+ * - CONFIG_CMDLINE_EXTEND=true, @data is empty string
+ *     CONFIG_CMDLINE + dt bootargs (even if dt bootargs are empty)
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=non-empty:
+ *     dt bootargs
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=empty, @data is non-empty string
+ *     @data is left unchanged
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=empty, @data is empty string
+ *     CONFIG_CMDLINE (or "" if that's not defined)
+ */
 extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
                                     int depth, void *data);
 extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
index c2fa3ecb0dce57dde9ad4a92a35dc4178d7a159e..146efefde2a157008fce62fcdaa376f13e2c7682 100644 (file)
 
 struct percpu_rw_semaphore {
        struct rcu_sync         rss;
-       unsigned int __percpu   *fast_read_ctr;
+       unsigned int __percpu   *read_count;
        struct rw_semaphore     rw_sem;
-       atomic_t                slow_read_ctr;
-       wait_queue_head_t       write_waitq;
+       wait_queue_head_t       writer;
+       int                     readers_block;
 };
 
-extern void percpu_down_read(struct percpu_rw_semaphore *);
-extern int  percpu_down_read_trylock(struct percpu_rw_semaphore *);
-extern void percpu_up_read(struct percpu_rw_semaphore *);
+extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
+extern void __percpu_up_read(struct percpu_rw_semaphore *);
+
+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+{
+       might_sleep();
+
+       rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
+
+       preempt_disable();
+       /*
+        * We are in an RCU-sched read-side critical section, so the writer
+        * cannot both change sem->state from readers_fast and start checking
+        * counters while we are here. So if we see !sem->state, we know that
+        * the writer won't be checking until we're past the preempt_enable()
+        * and that one the synchronize_sched() is done, the writer will see
+        * anything we did within this RCU-sched read-size critical section.
+        */
+       __this_cpu_inc(*sem->read_count);
+       if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+               __percpu_down_read(sem, false); /* Unconditional memory barrier */
+       preempt_enable();
+       /*
+        * The barrier() from preempt_enable() prevents the compiler from
+        * bleeding the critical section out.
+        */
+}
+
+static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
+{
+       int ret = 1;
+
+       preempt_disable();
+       /*
+        * Same as in percpu_down_read().
+        */
+       __this_cpu_inc(*sem->read_count);
+       if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+               ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
+       preempt_enable();
+       /*
+        * The barrier() from preempt_enable() prevents the compiler from
+        * bleeding the critical section out.
+        */
+
+       if (ret)
+               rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);
+
+       return ret;
+}
+
+static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+{
+       /*
+        * The barrier() in preempt_disable() prevents the compiler from
+        * bleeding the critical section out.
+        */
+       preempt_disable();
+       /*
+        * Same as in percpu_down_read().
+        */
+       if (likely(rcu_sync_is_idle(&sem->rss)))
+               __this_cpu_dec(*sem->read_count);
+       else
+               __percpu_up_read(sem); /* Unconditional memory barrier */
+       preempt_enable();
+
+       rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
+}
 
 extern void percpu_down_write(struct percpu_rw_semaphore *);
 extern void percpu_up_write(struct percpu_rw_semaphore *);
 
 extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
                                const char *, struct lock_class_key *);
+
 extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
 
-#define percpu_init_rwsem(brw) \
+#define percpu_init_rwsem(sem)                                 \
 ({                                                             \
        static struct lock_class_key rwsem_key;                 \
-       __percpu_init_rwsem(brw, #brw, &rwsem_key);             \
+       __percpu_init_rwsem(sem, #sem, &rwsem_key);             \
 })
 
-
 #define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
 
 static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
index a288010667dcf561f1019a6d28ade8ead8673787..9dcb00ca2ec1ffd9c3672a3f7110669c76795e89 100644 (file)
@@ -999,6 +999,11 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
                loff_t *ppos);
 
 
+static inline bool perf_paranoid_any(void)
+{
+       return sysctl_perf_event_paranoid > 2;
+}
+
 static inline bool perf_paranoid_tracepoint_raw(void)
 {
        return sysctl_perf_event_paranoid > -1;
diff --git a/include/linux/platform_data/ds2482.h b/include/linux/platform_data/ds2482.h
new file mode 100644 (file)
index 0000000..5a6879e
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PLATFORM_DATA_DS2482__
+#define __PLATFORM_DATA_DS2482__
+
+struct ds2482_platform_data {
+       int             slpz_gpio;
+};
+
+#endif /* __PLATFORM_DATA_DS2482__ */
index 528be6787796b52a405fbc0af8bf707c31d55192..6a5d654f444726abc824d9d07377ad66e86c6a44 100644 (file)
@@ -573,6 +573,7 @@ struct dev_pm_info {
        struct wakeup_source    *wakeup;
        bool                    wakeup_path:1;
        bool                    syscore:1;
+       bool                    no_pm_callbacks:1;      /* Owned by the PM core */
 #else
        unsigned int            should_wakeup:1;
 #endif
index c08386fb3e0845933f074bda2117734c128075b3..9fb4f40d9a26ed6c964990e896a3c733c5f00185 100644 (file)
@@ -96,7 +96,7 @@ extern void poll_initwait(struct poll_wqueues *pwq);
 extern void poll_freewait(struct poll_wqueues *pwq);
 extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
                                 ktime_t *expires, unsigned long slack);
-extern long select_estimate_accuracy(struct timespec *tv);
+extern u64 select_estimate_accuracy(struct timespec *tv);
 
 
 static inline int poll_schedule(struct poll_wqueues *pwq, int state)
index ef9f1592185d3189867b564efc4fdb8c151597a7..1c075892c6fdfb10518a52521c23e8a4e5f130b0 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/leds.h>
 #include <linux/spinlock.h>
 #include <linux/notifier.h>
+#include <linux/types.h>
 
 /*
  * All voltages, currents, charges, energies, time and temperatures in uV,
@@ -148,6 +149,12 @@ enum power_supply_property {
        POWER_SUPPLY_PROP_SCOPE,
        POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
        POWER_SUPPLY_PROP_CALIBRATE,
+       /* Local extensions */
+       POWER_SUPPLY_PROP_USB_HC,
+       POWER_SUPPLY_PROP_USB_OTG,
+       POWER_SUPPLY_PROP_CHARGE_ENABLED,
+       /* Local extensions of type int64_t */
+       POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
        /* Properties of type `const char *' */
        POWER_SUPPLY_PROP_MODEL_NAME,
        POWER_SUPPLY_PROP_MANUFACTURER,
@@ -172,6 +179,7 @@ enum power_supply_notifier_events {
 union power_supply_propval {
        int intval;
        const char *strval;
+       int64_t int64val;
 };
 
 struct device_node;
index 831479f8df8f1b70638d59edc3cde6fc6fdcc22f..5cae2c6c90ad3b6b2a3f0af1b729596c41b5fec3 100644 (file)
 #ifndef _LINUX_PSTORE_H
 #define _LINUX_PSTORE_H
 
-#include <linux/time.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
 #include <linux/kmsg_dump.h>
 #include <linux/mutex.h>
-#include <linux/types.h>
 #include <linux/spinlock.h>
-#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/types.h>
 
 /* types */
 enum pstore_type_id {
@@ -67,6 +68,10 @@ struct pstore_info {
                        enum kmsg_dump_reason reason, u64 *id,
                        unsigned int part, const char *buf, bool compressed,
                        size_t size, struct pstore_info *psi);
+       int             (*write_buf_user)(enum pstore_type_id type,
+                       enum kmsg_dump_reason reason, u64 *id,
+                       unsigned int part, const char __user *buf,
+                       bool compressed, size_t size, struct pstore_info *psi);
        int             (*erase)(enum pstore_type_id type, u64 id,
                        int count, struct timespec time,
                        struct pstore_info *psi);
index 9c9d6c154c8e926207856a749e2948a7d7346b69..45ac5a0d29eefa85b4928c4c9dbe654dcfd047ca 100644 (file)
 #ifndef __LINUX_PSTORE_RAM_H__
 #define __LINUX_PSTORE_RAM_H__
 
+#include <linux/compiler.h>
 #include <linux/device.h>
+#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/types.h>
-#include <linux/init.h>
 
 struct persistent_ram_buffer;
 struct rs_control;
@@ -59,7 +60,9 @@ void persistent_ram_free(struct persistent_ram_zone *prz);
 void persistent_ram_zap(struct persistent_ram_zone *prz);
 
 int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
-       unsigned int count);
+                        unsigned int count);
+int persistent_ram_write_user(struct persistent_ram_zone *prz,
+                             const void __user *s, unsigned int count);
 
 void persistent_ram_save_old(struct persistent_ram_zone *prz);
 size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
@@ -68,6 +71,8 @@ void persistent_ram_free_old(struct persistent_ram_zone *prz);
 ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
        char *str, size_t len);
 
+void ramoops_console_write_buf(const char *buf, size_t size);
+
 /*
  * Ramoops platform data
  * @mem_size   memory size for ramoops
index a75840c1aa71414acc43468da6cc8f83ec06c1b8..9c29122037f95283cc02b8af995975e80e059abd 100644 (file)
@@ -34,6 +34,7 @@ extern const struct file_operations random_fops, urandom_fops;
 #endif
 
 unsigned int get_random_int(void);
+unsigned long get_random_long(void);
 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
 
 u32 prandom_u32(void);
index a63a33e6196e23905406e2dab831b17b5d883597..ece7ed9a4a7054abf6649489d53016117698fac9 100644 (file)
@@ -59,6 +59,7 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
 }
 
 extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type);
+extern void rcu_sync_enter_start(struct rcu_sync *);
 extern void rcu_sync_enter(struct rcu_sync *);
 extern void rcu_sync_exit(struct rcu_sync *);
 extern void rcu_sync_dtor(struct rcu_sync *);
index 1c0193baea2a57a53ed41a650df3717f4a0148ba..ede29e8db82ddaa03698dd9a778124b935e204fd 100644 (file)
@@ -173,6 +173,9 @@ extern bool single_task_running(void);
 extern unsigned long nr_iowait(void);
 extern unsigned long nr_iowait_cpu(int cpu);
 extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
+#ifdef CONFIG_CPU_QUIET
+extern u64 nr_running_integral(unsigned int cpu);
+#endif
 
 extern void calc_global_load(unsigned long ticks);
 
@@ -314,6 +317,15 @@ extern char ___assert_task_state[1 - 2*!!(
 /* Task command name length */
 #define TASK_COMM_LEN 16
 
+enum task_event {
+       PUT_PREV_TASK   = 0,
+       PICK_NEXT_TASK  = 1,
+       TASK_WAKE       = 2,
+       TASK_MIGRATE    = 3,
+       TASK_UPDATE     = 4,
+       IRQ_UPDATE      = 5,
+};
+
 #include <linux/spinlock.h>
 
 /*
@@ -929,6 +941,14 @@ enum cpu_idle_type {
 #define SCHED_CAPACITY_SHIFT   10
 #define SCHED_CAPACITY_SCALE   (1L << SCHED_CAPACITY_SHIFT)
 
+struct sched_capacity_reqs {
+       unsigned long cfs;
+       unsigned long rt;
+       unsigned long dl;
+
+       unsigned long total;
+};
+
 /*
  * Wake-queues are lists of tasks with a pending wakeup, whose
  * callers have already marked the task as woken internally,
@@ -991,6 +1011,7 @@ extern void wake_up_q(struct wake_q_head *head);
 #define SD_PREFER_SIBLING      0x1000  /* Prefer to place tasks in a sibling domain */
 #define SD_OVERLAP             0x2000  /* sched_domains of this level overlap */
 #define SD_NUMA                        0x4000  /* cross-node balancing */
+#define SD_SHARE_CAP_STATES    0x8000  /* Domain members share capacity state */
 
 #ifdef CONFIG_SCHED_SMT
 static inline int cpu_smt_flags(void)
@@ -1023,6 +1044,24 @@ struct sched_domain_attr {
 
 extern int sched_domain_level_max;
 
+struct capacity_state {
+       unsigned long cap;      /* compute capacity */
+       unsigned long power;    /* power consumption at this compute capacity */
+};
+
+struct idle_state {
+       unsigned long power;     /* power consumption in this idle state */
+};
+
+struct sched_group_energy {
+       unsigned int nr_idle_states;    /* number of idle states */
+       struct idle_state *idle_states; /* ptr to idle state array */
+       unsigned int nr_cap_states;     /* number of capacity states */
+       struct capacity_state *cap_states; /* ptr to capacity state array */
+};
+
+unsigned long capacity_curr_of(int cpu);
+
 struct sched_group;
 
 struct sched_domain {
@@ -1121,6 +1160,8 @@ bool cpus_share_cache(int this_cpu, int that_cpu);
 
 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
 typedef int (*sched_domain_flags_f)(void);
+typedef
+const struct sched_group_energy * const(*sched_domain_energy_f)(int cpu);
 
 #define SDTL_OVERLAP   0x01
 
@@ -1133,6 +1174,7 @@ struct sd_data {
 struct sched_domain_topology_level {
        sched_domain_mask_f mask;
        sched_domain_flags_f sd_flags;
+       sched_domain_energy_f energy;
        int                 flags;
        int                 numa_level;
        struct sd_data      data;
@@ -1243,6 +1285,41 @@ struct sched_statistics {
 };
 #endif
 
+#ifdef CONFIG_SCHED_WALT
+#define RAVG_HIST_SIZE_MAX  5
+
+/* ravg represents frequency scaled cpu-demand of tasks */
+struct ravg {
+       /*
+        * 'mark_start' marks the beginning of an event (task waking up, task
+        * starting to execute, task being preempted) within a window
+        *
+        * 'sum' represents how runnable a task has been within current
+        * window. It incorporates both running time and wait time and is
+        * frequency scaled.
+        *
+        * 'sum_history' keeps track of history of 'sum' seen over previous
+        * RAVG_HIST_SIZE windows. Windows where task was entirely sleeping are
+        * ignored.
+        *
+        * 'demand' represents maximum sum seen over previous
+        * sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
+        * demand for tasks.
+        *
+        * 'curr_window' represents task's contribution to cpu busy time
+        * statistics (rq->curr_runnable_sum) in current window
+        *
+        * 'prev_window' represents task's contribution to cpu busy time
+        * statistics (rq->prev_runnable_sum) in previous window
+        */
+       u64 mark_start;
+       u32 sum, demand;
+       u32 sum_history[RAVG_HIST_SIZE_MAX];
+       u32 curr_window, prev_window;
+       u16 active_windows;
+};
+#endif
+
 struct sched_entity {
        struct load_weight      load;           /* for load-balancing */
        struct rb_node          run_node;
@@ -1400,6 +1477,15 @@ struct task_struct {
        const struct sched_class *sched_class;
        struct sched_entity se;
        struct sched_rt_entity rt;
+#ifdef CONFIG_SCHED_WALT
+       struct ravg ravg;
+       /*
+        * 'init_load_pct' represents the initial task load assigned to children
+        * of this task
+        */
+       u32 init_load_pct;
+#endif
+
 #ifdef CONFIG_CGROUP_SCHED
        struct task_group *sched_task_group;
 #endif
@@ -1769,8 +1855,8 @@ struct task_struct {
         * time slack values; these are used to round up poll() and
         * select() etc timeout values. These are in nanoseconds.
         */
-       unsigned long timer_slack_ns;
-       unsigned long default_timer_slack_ns;
+       u64 timer_slack_ns;
+       u64 default_timer_slack_ns;
 
 #ifdef CONFIG_KASAN
        unsigned int kasan_depth;
index c9e4731cf10b8e97956b160c503e447490991931..d68e88c9d4d7032943e65c519aaf401f4bbe16b7 100644 (file)
@@ -39,6 +39,16 @@ extern unsigned int sysctl_sched_latency;
 extern unsigned int sysctl_sched_min_granularity;
 extern unsigned int sysctl_sched_wakeup_granularity;
 extern unsigned int sysctl_sched_child_runs_first;
+extern unsigned int sysctl_sched_is_big_little;
+extern unsigned int sysctl_sched_sync_hint_enable;
+extern unsigned int sysctl_sched_initial_task_util;
+extern unsigned int sysctl_sched_cstate_aware;
+#ifdef CONFIG_SCHED_WALT
+extern unsigned int sysctl_sched_use_walt_cpu_util;
+extern unsigned int sysctl_sched_use_walt_task_util;
+extern unsigned int sysctl_sched_walt_init_task_load_pct;
+extern unsigned int sysctl_sched_walt_cpu_high_irqload;
+#endif
 
 enum sched_tunable_scaling {
        SCHED_TUNABLESCALING_NONE,
@@ -77,6 +87,22 @@ extern int sysctl_sched_rt_runtime;
 extern unsigned int sysctl_sched_cfs_bandwidth_slice;
 #endif
 
+#ifdef CONFIG_SCHED_TUNE
+extern unsigned int sysctl_sched_cfs_boost;
+int sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
+                                  void __user *buffer, size_t *length,
+                                  loff_t *ppos);
+static inline unsigned int get_sysctl_sched_cfs_boost(void)
+{
+       return sysctl_sched_cfs_boost;
+}
+#else
+static inline unsigned int get_sysctl_sched_cfs_boost(void)
+{
+       return 0;
+}
+#endif
+
 #ifdef CONFIG_SCHED_AUTOGROUP
 extern unsigned int sysctl_sched_autogroup_enabled;
 #endif
diff --git a/include/linux/sched_energy.h b/include/linux/sched_energy.h
new file mode 100644 (file)
index 0000000..1daf3e1
--- /dev/null
@@ -0,0 +1,44 @@
+#ifndef _LINUX_SCHED_ENERGY_H
+#define _LINUX_SCHED_ENERGY_H
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+/*
+ * There doesn't seem to be an NR_CPUS style max number of sched domain
+ * levels so here's an arbitrary constant one for the moment.
+ *
+ * The levels alluded to here correspond to entries in struct
+ * sched_domain_topology_level that are meant to be populated by arch
+ * specific code (topology.c).
+ */
+#define NR_SD_LEVELS 8
+
+#define SD_LEVEL0   0
+#define SD_LEVEL1   1
+#define SD_LEVEL2   2
+#define SD_LEVEL3   3
+#define SD_LEVEL4   4
+#define SD_LEVEL5   5
+#define SD_LEVEL6   6
+#define SD_LEVEL7   7
+
+/*
+ * Convenience macro for iterating through said sd levels.
+ */
+#define for_each_possible_sd_level(level)                  \
+       for (level = 0; level < NR_SD_LEVELS; level++)
+
+#ifdef CONFIG_SMP
+
+extern struct sched_group_energy *sge_array[NR_CPUS][NR_SD_LEVELS];
+
+void init_sched_energy_costs(void);
+
+#else
+
+#define init_sched_energy_costs() do { } while (0)
+
+#endif /* CONFIG_SMP */
+
+#endif
index 297d4fa1cfe513d85340ae43c1217d47e9c7e881..0afc11f8f30030af685217aa0656da16932dd09e 100644 (file)
@@ -66,6 +66,7 @@ struct uart_ops {
        void            (*set_ldisc)(struct uart_port *, struct ktermios *);
        void            (*pm)(struct uart_port *, unsigned int state,
                              unsigned int oldstate);
+       void            (*wake_peer)(struct uart_port *);
 
        /*
         * Return a string describing the type of the port
index 8ff34ed1ae8a1aecabccea370cac989fb24382e1..a0596ca0e80ac77aeb0afa29648532ef51a5deae 100644 (file)
@@ -15,6 +15,7 @@ struct sock_diag_handler {
        __u8 family;
        int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
        int (*get_info)(struct sk_buff *skb, struct sock *sk);
+       int (*destroy)(struct sk_buff *skb, struct nlmsghdr *nlh);
 };
 
 int sock_diag_register(const struct sock_diag_handler *h);
@@ -74,4 +75,5 @@ bool sock_diag_has_destroy_listeners(const struct sock *sk)
 }
 void sock_diag_broadcast_destroy(struct sock *sk);
 
+int sock_diag_destroy(struct sock *sk, int err);
 #endif
index 8b6ec7ef0854e0f51fd94e38dbf7b831c9015f6c..c59803dc68de7e7a1d4805b83dabad5a5b00aaf8 100644 (file)
@@ -433,6 +433,7 @@ extern bool pm_get_wakeup_count(unsigned int *count, bool block);
 extern bool pm_save_wakeup_count(unsigned int count);
 extern void pm_wakep_autosleep_enabled(bool set);
 extern void pm_print_active_wakeup_sources(void);
+extern void pm_get_active_wakeup_sources(char *pending_sources, size_t max);
 
 static inline void lock_system_sleep(void)
 {
index eded095fe81e5c3ddc04e2af91f5bdaf9e4d28a5..4cf89517783ab8e4a56a71ea6e88646427c74aa9 100644 (file)
@@ -158,8 +158,8 @@ static inline int arch_within_stack_frames(const void * const stack,
 extern void __check_object_size(const void *ptr, unsigned long n,
                                        bool to_user);
 
-static inline void check_object_size(const void *ptr, unsigned long n,
-                                    bool to_user)
+static __always_inline void check_object_size(const void *ptr, unsigned long n,
+                                             bool to_user)
 {
        if (!__builtin_constant_p(n))
                __check_object_size(ptr, n, to_user);
index ec89d846324cac714807f7c002339636c497e4d8..b7246d2ed7c9772cf4bc8757ae9e64664d47516c 100644 (file)
@@ -233,6 +233,7 @@ static inline u64 ktime_get_raw_ns(void)
 
 extern u64 ktime_get_mono_fast_ns(void);
 extern u64 ktime_get_raw_fast_ns(void);
+extern u64 ktime_get_boot_fast_ns(void);
 
 /*
  * Timespec interfaces utilizing the ktime based ones
diff --git a/include/linux/usb/class-dual-role.h b/include/linux/usb/class-dual-role.h
new file mode 100644 (file)
index 0000000..c6df223
--- /dev/null
@@ -0,0 +1,129 @@
+#ifndef __LINUX_CLASS_DUAL_ROLE_H__
+#define __LINUX_CLASS_DUAL_ROLE_H__
+
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct device;
+
+enum dual_role_supported_modes {
+       DUAL_ROLE_SUPPORTED_MODES_DFP_AND_UFP = 0,
+       DUAL_ROLE_SUPPORTED_MODES_DFP,
+       DUAL_ROLE_SUPPORTED_MODES_UFP,
+/*The following should be the last element*/
+       DUAL_ROLE_PROP_SUPPORTED_MODES_TOTAL,
+};
+
+enum {
+       DUAL_ROLE_PROP_MODE_UFP = 0,
+       DUAL_ROLE_PROP_MODE_DFP,
+       DUAL_ROLE_PROP_MODE_NONE,
+/*The following should be the last element*/
+       DUAL_ROLE_PROP_MODE_TOTAL,
+};
+
+enum {
+       DUAL_ROLE_PROP_PR_SRC = 0,
+       DUAL_ROLE_PROP_PR_SNK,
+       DUAL_ROLE_PROP_PR_NONE,
+/*The following should be the last element*/
+       DUAL_ROLE_PROP_PR_TOTAL,
+
+};
+
+enum {
+       DUAL_ROLE_PROP_DR_HOST = 0,
+       DUAL_ROLE_PROP_DR_DEVICE,
+       DUAL_ROLE_PROP_DR_NONE,
+/*The following should be the last element*/
+       DUAL_ROLE_PROP_DR_TOTAL,
+};
+
+enum {
+       DUAL_ROLE_PROP_VCONN_SUPPLY_NO = 0,
+       DUAL_ROLE_PROP_VCONN_SUPPLY_YES,
+/*The following should be the last element*/
+       DUAL_ROLE_PROP_VCONN_SUPPLY_TOTAL,
+};
+
+enum dual_role_property {
+       DUAL_ROLE_PROP_SUPPORTED_MODES = 0,
+       DUAL_ROLE_PROP_MODE,
+       DUAL_ROLE_PROP_PR,
+       DUAL_ROLE_PROP_DR,
+       DUAL_ROLE_PROP_VCONN_SUPPLY,
+};
+
+struct dual_role_phy_instance;
+
+/* Description of typec port */
+struct dual_role_phy_desc {
+       /* /sys/class/dual_role_usb/<name>/ */
+       const char *name;
+       enum dual_role_supported_modes supported_modes;
+       enum dual_role_property *properties;
+       size_t num_properties;
+
+       /* Callback for "cat /sys/class/dual_role_usb/<name>/<property>" */
+       int (*get_property)(struct dual_role_phy_instance *dual_role,
+                            enum dual_role_property prop,
+                            unsigned int *val);
+       /* Callback for "echo <value> >
+        *                      /sys/class/dual_role_usb/<name>/<property>" */
+       int (*set_property)(struct dual_role_phy_instance *dual_role,
+                            enum dual_role_property prop,
+                            const unsigned int *val);
+       /* Decides whether userspace can change a specific property */
+       int (*property_is_writeable)(struct dual_role_phy_instance *dual_role,
+                                     enum dual_role_property prop);
+};
+
+struct dual_role_phy_instance {
+       const struct dual_role_phy_desc *desc;
+
+       /* Driver private data */
+       void *drv_data;
+
+       struct device dev;
+       struct work_struct changed_work;
+};
+
+#if IS_ENABLED(CONFIG_DUAL_ROLE_USB_INTF)
+extern void dual_role_instance_changed(struct dual_role_phy_instance
+                                      *dual_role);
+extern struct dual_role_phy_instance *__must_check
+devm_dual_role_instance_register(struct device *parent,
+                                const struct dual_role_phy_desc *desc);
+extern void devm_dual_role_instance_unregister(struct device *dev,
+                                              struct dual_role_phy_instance
+                                              *dual_role);
+extern int dual_role_get_property(struct dual_role_phy_instance *dual_role,
+                                 enum dual_role_property prop,
+                                 unsigned int *val);
+extern int dual_role_set_property(struct dual_role_phy_instance *dual_role,
+                                 enum dual_role_property prop,
+                                 const unsigned int *val);
+extern int dual_role_property_is_writeable(struct dual_role_phy_instance
+                                          *dual_role,
+                                          enum dual_role_property prop);
+extern void *dual_role_get_drvdata(struct dual_role_phy_instance *dual_role);
+#else /* CONFIG_DUAL_ROLE_USB_INTF */
+static inline void dual_role_instance_changed(struct dual_role_phy_instance
+                                      *dual_role){}
+static inline struct dual_role_phy_instance *__must_check
+devm_dual_role_instance_register(struct device *parent,
+                                const struct dual_role_phy_desc *desc)
+{
+       return ERR_PTR(-ENOSYS);
+}
+static inline void devm_dual_role_instance_unregister(struct device *dev,
+                                              struct dual_role_phy_instance
+                                              *dual_role){}
+static inline void *dual_role_get_drvdata(struct dual_role_phy_instance
+               *dual_role)
+{
+       return ERR_PTR(-ENOSYS);
+}
+#endif /* CONFIG_DUAL_ROLE_USB_INTF */
+#endif /* __LINUX_CLASS_DUAL_ROLE_H__ */
index 1074b8921a5dc59dda948ef39e7281f63d420efb..15d7c311e86e60df68b27170f012675e1bef678b 100644 (file)
@@ -574,6 +574,7 @@ struct usb_function_instance {
        struct config_group group;
        struct list_head cfs_list;
        struct usb_function_driver *fd;
+       struct usb_function *f;
        int (*set_inst_name)(struct usb_function_instance *inst,
                              const char *name);
        void (*free_func_inst)(struct usb_function_instance *inst);
diff --git a/include/linux/usb/f_accessory.h b/include/linux/usb/f_accessory.h
new file mode 100644 (file)
index 0000000..ebe3c4d
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_ACCESSORY_H
+#define __LINUX_USB_F_ACCESSORY_H
+
+#include <uapi/linux/usb/f_accessory.h>
+
+#endif /* __LINUX_USB_F_ACCESSORY_H */
diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h
new file mode 100644 (file)
index 0000000..4e84177
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_MTP_H
+#define __LINUX_USB_F_MTP_H
+
+#include <uapi/linux/usb/f_mtp.h>
+
+#endif /* __LINUX_USB_F_MTP_H */
index 3e5d9075960f6c756ead3c60013f84f873206932..73fae8c4a5fb50d94b72f12bed28f98d170f5787 100644 (file)
@@ -189,6 +189,7 @@ extern void __inc_zone_state(struct zone *, enum zone_stat_item);
 extern void dec_zone_state(struct zone *, enum zone_stat_item);
 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 
+void quiet_vmstat(void);
 void cpu_vm_stats_fold(int cpu);
 void refresh_zone_stat_thresholds(void);
 
@@ -249,6 +250,7 @@ static inline void __dec_zone_page_state(struct page *page,
 
 static inline void refresh_zone_stat_thresholds(void) { }
 static inline void cpu_vm_stats_fold(int cpu) { }
+static inline void quiet_vmstat(void) { }
 
 static inline void drain_zonestat(struct zone *zone,
                        struct per_cpu_pageset *pset) { }
diff --git a/include/linux/wakelock.h b/include/linux/wakelock.h
new file mode 100644 (file)
index 0000000..f4a698a
--- /dev/null
@@ -0,0 +1,67 @@
+/* include/linux/wakelock.h
+ *
+ * Copyright (C) 2007-2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_WAKELOCK_H
+#define _LINUX_WAKELOCK_H
+
+#include <linux/ktime.h>
+#include <linux/device.h>
+
+/* A wake_lock prevents the system from entering suspend or other low power
+ * states when active. If the type is set to WAKE_LOCK_SUSPEND, the wake_lock
+ * prevents a full system suspend.
+ */
+
+enum {
+       WAKE_LOCK_SUSPEND, /* Prevent suspend */
+       WAKE_LOCK_TYPE_COUNT
+};
+
+struct wake_lock {
+       struct wakeup_source ws;
+};
+
+static inline void wake_lock_init(struct wake_lock *lock, int type,
+                                 const char *name)
+{
+       wakeup_source_init(&lock->ws, name);
+}
+
+static inline void wake_lock_destroy(struct wake_lock *lock)
+{
+       wakeup_source_trash(&lock->ws);
+}
+
+static inline void wake_lock(struct wake_lock *lock)
+{
+       __pm_stay_awake(&lock->ws);
+}
+
+static inline void wake_lock_timeout(struct wake_lock *lock, long timeout)
+{
+       __pm_wakeup_event(&lock->ws, jiffies_to_msecs(timeout));
+}
+
+static inline void wake_unlock(struct wake_lock *lock)
+{
+       __pm_relax(&lock->ws);
+}
+
+static inline int wake_lock_active(struct wake_lock *lock)
+{
+       return lock->ws.active;
+}
+
+#endif
diff --git a/include/linux/wakeup_reason.h b/include/linux/wakeup_reason.h
new file mode 100644 (file)
index 0000000..d84d8c3
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * include/linux/wakeup_reason.h
+ *
+ * Logs the reason which caused the kernel to resume
+ * from the suspend mode.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_WAKEUP_REASON_H
+#define _LINUX_WAKEUP_REASON_H
+
+#define MAX_SUSPEND_ABORT_LEN 256
+
+void log_wakeup_reason(int irq);
+int check_wakeup_reason(int irq);
+
+#ifdef CONFIG_SUSPEND
+void log_suspend_abort_reason(const char *fmt, ...);
+#else
+static inline void log_suspend_abort_reason(const char *fmt, ...) { }
+#endif
+
+#endif /* _LINUX_WAKEUP_REASON_H */
diff --git a/include/linux/wlan_plat.h b/include/linux/wlan_plat.h
new file mode 100644 (file)
index 0000000..8e8b06f
--- /dev/null
@@ -0,0 +1,30 @@
+/* include/linux/wlan_plat.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WLAN_PLAT_H_
+#define _LINUX_WLAN_PLAT_H_
+
+#define WLAN_PLAT_NODFS_FLAG   0x01
+
+struct wifi_platform_data {
+       int (*set_power)(int val);
+       int (*set_reset)(int val);
+       int (*set_carddetect)(int val);
+       void *(*mem_prealloc)(int section, unsigned long size);
+       int (*get_mac_addr)(unsigned char *buf);
+       int (*get_wake_irq)(void);
+       void *(*get_country_code)(char *ccode, u32 flags);
+};
+
+#endif
index 78003dfb8539bd42f75f8c2d04cbb7dce872e1bf..3275ddf9f00da8313eec5c4ebf77c25ffd624ed7 100644 (file)
@@ -227,6 +227,8 @@ static inline bool ipv6_is_mld(struct sk_buff *skb, int nexthdr, int offset)
 void addrconf_prefix_rcv(struct net_device *dev,
                         u8 *opt, int len, bool sllao);
 
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table);
+
 /*
  *     anycast prototypes (anycast.c)
  */
index 59160de702b68023c248181ab6c5fcb6e2f42452..bdd985f41022416ea04594b2a4ae7b6676d1d111 100644 (file)
@@ -29,6 +29,8 @@ struct fib_rule {
        int                     suppress_prefixlen;
        char                    iifname[IFNAMSIZ];
        char                    oifname[IFNAMSIZ];
+       kuid_t                  uid_start;
+       kuid_t                  uid_end;
        struct rcu_head         rcu;
 };
 
@@ -87,6 +89,8 @@ struct fib_rules_ops {
        [FRA_FWMARK]    = { .type = NLA_U32 }, \
        [FRA_FWMASK]    = { .type = NLA_U32 }, \
        [FRA_TABLE]     = { .type = NLA_U32 }, \
+       [FRA_UID_START] = { .type = NLA_U32 }, \
+       [FRA_UID_END]   = { .type = NLA_U32 }, \
        [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
        [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \
        [FRA_GOTO]      = { .type = NLA_U32 }
index 83969eebebf3b458bc4160fe0aded6f5d8fdd30c..833080732dece63959afa15a98949b5516b31bf7 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/in6.h>
 #include <linux/atomic.h>
 #include <net/flow_dissector.h>
+#include <linux/uidgid.h>
 
 /*
  * ifindex generation is per-net namespace, and loopback is
@@ -38,6 +39,7 @@ struct flowi_common {
 #define FLOWI_FLAG_SKIP_NH_OIF         0x08
        __u32   flowic_secid;
        struct flowi_tunnel flowic_tun_key;
+       kuid_t  flowic_uid;
 };
 
 union flowi_uli {
@@ -75,6 +77,7 @@ struct flowi4 {
 #define flowi4_flags           __fl_common.flowic_flags
 #define flowi4_secid           __fl_common.flowic_secid
 #define flowi4_tun_key         __fl_common.flowic_tun_key
+#define flowi4_uid             __fl_common.flowic_uid
 
        /* (saddr,daddr) must be grouped, same order as in IP header */
        __be32                  saddr;
@@ -94,7 +97,8 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
                                      __u32 mark, __u8 tos, __u8 scope,
                                      __u8 proto, __u8 flags,
                                      __be32 daddr, __be32 saddr,
-                                     __be16 dport, __be16 sport)
+                                     __be16 dport, __be16 sport,
+                                     kuid_t uid)
 {
        fl4->flowi4_oif = oif;
        fl4->flowi4_iif = LOOPBACK_IFINDEX;
@@ -105,6 +109,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
        fl4->flowi4_flags = flags;
        fl4->flowi4_secid = 0;
        fl4->flowi4_tun_key.tun_id = 0;
+       fl4->flowi4_uid = uid;
        fl4->daddr = daddr;
        fl4->saddr = saddr;
        fl4->fl4_dport = dport;
@@ -133,6 +138,7 @@ struct flowi6 {
 #define flowi6_flags           __fl_common.flowic_flags
 #define flowi6_secid           __fl_common.flowic_secid
 #define flowi6_tun_key         __fl_common.flowic_tun_key
+#define flowi6_uid             __fl_common.flowic_uid
        struct in6_addr         daddr;
        struct in6_addr         saddr;
        __be32                  flowlabel;
@@ -177,6 +183,7 @@ struct flowi {
 #define flowi_flags    u.__fl_common.flowic_flags
 #define flowi_secid    u.__fl_common.flowic_secid
 #define flowi_tun_key  u.__fl_common.flowic_tun_key
+#define flowi_uid      u.__fl_common.flowic_uid
 } __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
index b450d8653b30c0b838a2a591ad2e5d98b0ea42d3..f78c3a52529bc7885db8a01d5a3d604a99b87128 100644 (file)
@@ -170,6 +170,7 @@ struct ip_reply_arg {
                                /* -1 if not needed */ 
        int         bound_dev_if;
        u8          tos;
+       kuid_t      uid;
 }; 
 
 #define IP_REPLY_ARG_NOSRCCHECK 1
index 295d291269e2c88ed4930041597a28f7c9a7a2f7..ba82feec2590f401b5a332ebf47b72c1f3319b7e 100644 (file)
@@ -116,7 +116,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
                  const struct in6_addr *gwaddr);
 
 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
-                    u32 mark);
+                    u32 mark, kuid_t uid);
 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
index a3b9ef74a3895dbce95b0eba9b4bb298f7be9d71..d016a8cb45cfb14684bbec4b04702592d65c72e9 100644 (file)
@@ -154,7 +154,8 @@ static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi
        flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos,
                           RT_SCOPE_UNIVERSE, proto,
                           sk ? inet_sk_flowi_flags(sk) : 0,
-                          daddr, saddr, dport, sport);
+                          daddr, saddr, dport, sport,
+                          sk ? sock_i_uid(sk) : GLOBAL_ROOT_UID);
        if (sk)
                security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
        return ip_route_output_flow(net, fl4, sk);
@@ -267,7 +268,8 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32
                flow_flags |= FLOWI_FLAG_ANYSRC;
 
        flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
-                          protocol, flow_flags, dst, src, dport, sport);
+                          protocol, flow_flags, dst, src, dport, sport,
+                          sock_i_uid(sk));
 }
 
 static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
index 3d5ff7436f41040eb7cbd9ef09f9cc57219b24c3..8f77df63a8f46670d8ff9be2be3b62d4e76032f8 100644 (file)
@@ -1067,6 +1067,7 @@ struct proto {
        void                    (*destroy_cgroup)(struct mem_cgroup *memcg);
        struct cg_proto         *(*proto_cgroup)(struct mem_cgroup *memcg);
 #endif
+       int                     (*diag_destroy)(struct sock *sk, int err);
 };
 
 int proto_register(struct proto *prot, int alloc_slab);
index e9d7a8ef9a6d6ee70da47892920d8db38dc9eaf5..4ea373913c9cd78a604e76688ad0ac047fc3e0d4 100644 (file)
@@ -284,6 +284,7 @@ extern int sysctl_tcp_autocorking;
 extern int sysctl_tcp_invalid_ratelimit;
 extern int sysctl_tcp_pacing_ss_ratio;
 extern int sysctl_tcp_pacing_ca_ratio;
+extern int sysctl_tcp_default_init_rwnd;
 
 extern atomic_long_t tcp_memory_allocated;
 extern struct percpu_counter tcp_sockets_allocated;
@@ -1171,6 +1172,8 @@ void tcp_set_state(struct sock *sk, int state);
 
 void tcp_done(struct sock *sk);
 
+int tcp_abort(struct sock *sk, int err);
+
 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
 {
        rx_opt->dsack = 0;
index 6d4ed18e14278a6091b7e1c3fa40f1b2d4796d06..e57f50258cdae779b920815216f240fa284be713 100644 (file)
@@ -238,6 +238,7 @@ int udp_get_port(struct sock *sk, unsigned short snum,
                 int (*saddr_cmp)(const struct sock *,
                                  const struct sock *));
 void udp_err(struct sk_buff *, u32);
+int udp_abort(struct sock *sk, int err);
 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
 int udp_push_pending_frames(struct sock *sk);
 void udp_flush_pending_frames(struct sock *sk);
diff --git a/include/trace/events/android_fs.h b/include/trace/events/android_fs.h
new file mode 100644 (file)
index 0000000..531da43
--- /dev/null
@@ -0,0 +1,31 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM android_fs
+
+#if !defined(_TRACE_ANDROID_FS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ANDROID_FS_H
+
+#include <linux/tracepoint.h>
+#include <trace/events/android_fs_template.h>
+
+DEFINE_EVENT(android_fs_data_start_template, android_fs_dataread_start,
+       TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+                pid_t pid, char *command),
+       TP_ARGS(inode, offset, bytes, pid, command));
+
+DEFINE_EVENT(android_fs_data_end_template, android_fs_dataread_end,
+       TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+       TP_ARGS(inode, offset, bytes));
+
+DEFINE_EVENT(android_fs_data_start_template, android_fs_datawrite_start,
+       TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+                pid_t pid, char *command),
+       TP_ARGS(inode, offset, bytes, pid, command));
+
+DEFINE_EVENT(android_fs_data_end_template, android_fs_datawrite_end,
+       TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+       TP_ARGS(inode, offset, bytes));
+
+#endif /* _TRACE_ANDROID_FS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/android_fs_template.h b/include/trace/events/android_fs_template.h
new file mode 100644 (file)
index 0000000..618988b
--- /dev/null
@@ -0,0 +1,79 @@
+#if !defined(_TRACE_ANDROID_FS_TEMPLATE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ANDROID_FS_TEMPLATE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(android_fs_data_start_template,
+       TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+                pid_t pid, char *command),
+       TP_ARGS(inode, offset, bytes, pid, command),
+       TP_STRUCT__entry(
+               __array(char, path, MAX_FILTER_STR_VAL);
+               __field(char *, pathname);
+               __field(loff_t, offset);
+               __field(int,    bytes);
+               __field(loff_t, i_size);
+               __string(cmdline, command);
+               __field(pid_t,  pid);
+               __field(ino_t,  ino);
+       ),
+       TP_fast_assign(
+               {
+                       struct dentry *d;
+
+                       /*
+                        * Grab a reference to the inode here because
+                        * d_obtain_alias() will either drop the inode
+                        * reference if it locates an existing dentry
+                        * or transfer the reference to the new dentry
+                        * created. In our case, the file is still open,
+                        * so the dentry is guaranteed to exist (connected),
+                        * so d_obtain_alias() drops the reference we
+                        * grabbed here.
+                        */
+                       ihold(inode);
+                       d = d_obtain_alias(inode);
+                       if (!IS_ERR(d)) {
+                               __entry->pathname = dentry_path(d,
+                                                       __entry->path,
+                                                       MAX_FILTER_STR_VAL);
+                               dput(d);
+                       } else
+                               __entry->pathname = ERR_PTR(-EINVAL);
+                       __entry->offset         = offset;
+                       __entry->bytes          = bytes;
+                       __entry->i_size         = i_size_read(inode);
+                       __assign_str(cmdline, command);
+                       __entry->pid            = pid;
+                       __entry->ino            = inode->i_ino;
+               }
+       ),
+       TP_printk("entry_name %s, offset %llu, bytes %d, cmdline %s,"
+                 " pid %d, i_size %llu, ino %lu",
+                 (IS_ERR(__entry->pathname) ? "ERROR" : __entry->pathname),
+                 __entry->offset, __entry->bytes, __get_str(cmdline),
+                 __entry->pid, __entry->i_size,
+                 (unsigned long) __entry->ino)
+);
+
+DECLARE_EVENT_CLASS(android_fs_data_end_template,
+       TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+       TP_ARGS(inode, offset, bytes),
+       TP_STRUCT__entry(
+               __field(ino_t,  ino);
+               __field(loff_t, offset);
+               __field(int,    bytes);
+       ),
+       TP_fast_assign(
+               {
+                       __entry->ino            = inode->i_ino;
+                       __entry->offset         = offset;
+                       __entry->bytes          = bytes;
+               }
+       ),
+       TP_printk("ino %lu, offset %llu, bytes %d",
+                 (unsigned long) __entry->ino,
+                 __entry->offset, __entry->bytes)
+);
+
+#endif /* _TRACE_ANDROID_FS_TEMPLATE_H */
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
new file mode 100644 (file)
index 0000000..951e6ca
--- /dev/null
@@ -0,0 +1,112 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq_interactive
+
+#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUFREQ_INTERACTIVE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(set,
+       TP_PROTO(u32 cpu_id, unsigned long targfreq,
+                unsigned long actualfreq),
+       TP_ARGS(cpu_id, targfreq, actualfreq),
+
+       TP_STRUCT__entry(
+           __field(          u32, cpu_id    )
+           __field(unsigned long, targfreq   )
+           __field(unsigned long, actualfreq )
+          ),
+
+       TP_fast_assign(
+           __entry->cpu_id = (u32) cpu_id;
+           __entry->targfreq = targfreq;
+           __entry->actualfreq = actualfreq;
+       ),
+
+       TP_printk("cpu=%u targ=%lu actual=%lu",
+             __entry->cpu_id, __entry->targfreq,
+             __entry->actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_setspeed,
+       TP_PROTO(u32 cpu_id, unsigned long targfreq,
+            unsigned long actualfreq),
+       TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DECLARE_EVENT_CLASS(loadeval,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curtarg, unsigned long curactual,
+                    unsigned long newtarg),
+                   TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
+
+           TP_STRUCT__entry(
+                   __field(unsigned long, cpu_id    )
+                   __field(unsigned long, load      )
+                   __field(unsigned long, curtarg   )
+                   __field(unsigned long, curactual )
+                   __field(unsigned long, newtarg   )
+           ),
+
+           TP_fast_assign(
+                   __entry->cpu_id = cpu_id;
+                   __entry->load = load;
+                   __entry->curtarg = curtarg;
+                   __entry->curactual = curactual;
+                   __entry->newtarg = newtarg;
+           ),
+
+           TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
+                     __entry->cpu_id, __entry->load, __entry->curtarg,
+                     __entry->curactual, __entry->newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_target,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curtarg, unsigned long curactual,
+                    unsigned long newtarg),
+           TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_already,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curtarg, unsigned long curactual,
+                    unsigned long newtarg),
+           TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curtarg, unsigned long curactual,
+                    unsigned long newtarg),
+           TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+TRACE_EVENT(cpufreq_interactive_boost,
+           TP_PROTO(const char *s),
+           TP_ARGS(s),
+           TP_STRUCT__entry(
+                   __string(s, s)
+           ),
+           TP_fast_assign(
+                   __assign_str(s, s);
+           ),
+           TP_printk("%s", __get_str(s))
+);
+
+TRACE_EVENT(cpufreq_interactive_unboost,
+           TP_PROTO(const char *s),
+           TP_ARGS(s),
+           TP_STRUCT__entry(
+                   __string(s, s)
+           ),
+           TP_fast_assign(
+                   __assign_str(s, s);
+           ),
+           TP_printk("%s", __get_str(s))
+);
+
+#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/cpufreq_sched.h b/include/trace/events/cpufreq_sched.h
new file mode 100644 (file)
index 0000000..a46cd08
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ *  Copyright (C)  2015 Steve Muckle <smuckle@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq_sched
+
+#if !defined(_TRACE_CPUFREQ_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUFREQ_SCHED_H
+
+#include <linux/sched.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cpufreq_sched_throttled,
+           TP_PROTO(unsigned int rem),
+           TP_ARGS(rem),
+           TP_STRUCT__entry(
+                   __field(    unsigned int,   rem)
+           ),
+           TP_fast_assign(
+                   __entry->rem = rem;
+           ),
+           TP_printk("throttled - %d usec remaining", __entry->rem)
+);
+
+TRACE_EVENT(cpufreq_sched_request_opp,
+           TP_PROTO(int cpu,
+                    unsigned long capacity,
+                    unsigned int freq_new,
+                    unsigned int requested_freq),
+           TP_ARGS(cpu, capacity, freq_new, requested_freq),
+           TP_STRUCT__entry(
+                   __field(    int,            cpu)
+                   __field(    unsigned long,  capacity)
+                   __field(    unsigned int,   freq_new)
+                   __field(    unsigned int,   requested_freq)
+                   ),
+           TP_fast_assign(
+                   __entry->cpu = cpu;
+                   __entry->capacity = capacity;
+                   __entry->freq_new = freq_new;
+                   __entry->requested_freq = requested_freq;
+                   ),
+           TP_printk("cpu %d cap change, cluster cap request %ld => OPP %d "
+                     "(cur %d)",
+                     __entry->cpu, __entry->capacity, __entry->freq_new,
+                     __entry->requested_freq)
+);
+
+TRACE_EVENT(cpufreq_sched_update_capacity,
+           TP_PROTO(int cpu,
+                    bool request,
+                    struct sched_capacity_reqs *scr,
+                    unsigned long new_capacity),
+           TP_ARGS(cpu, request, scr, new_capacity),
+           TP_STRUCT__entry(
+                   __field(    int,            cpu)
+                   __field(    bool,           request)
+                   __field(    unsigned long,  cfs)
+                   __field(    unsigned long,  rt)
+                   __field(    unsigned long,  dl)
+                   __field(    unsigned long,  total)
+                   __field(    unsigned long,  new_total)
+           ),
+           TP_fast_assign(
+                   __entry->cpu = cpu;
+                   __entry->request = request;
+                   __entry->cfs = scr->cfs;
+                   __entry->rt = scr->rt;
+                   __entry->dl = scr->dl;
+                   __entry->total = scr->total;
+                   __entry->new_total = new_capacity;
+           ),
+           TP_printk("cpu=%d set_cap=%d cfs=%ld rt=%ld dl=%ld old_tot=%ld "
+                     "new_tot=%ld",
+                     __entry->cpu, __entry->request, __entry->cfs, __entry->rt,
+                     __entry->dl, __entry->total, __entry->new_total)
+);
+
+#endif /* _TRACE_CPUFREQ_SCHED_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/gpu.h b/include/trace/events/gpu.h
new file mode 100644 (file)
index 0000000..7e15cdf
--- /dev/null
@@ -0,0 +1,143 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gpu
+
+#if !defined(_TRACE_GPU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_GPU_H
+
+#include <linux/tracepoint.h>
+#include <linux/time.h>
+
+#define show_secs_from_ns(ns) \
+       ({ \
+               u64 t = ns + (NSEC_PER_USEC / 2); \
+               do_div(t, NSEC_PER_SEC); \
+               t; \
+       })
+
+#define show_usecs_from_ns(ns) \
+       ({ \
+               u64 t = ns + (NSEC_PER_USEC / 2) ; \
+               u32 rem; \
+               do_div(t, NSEC_PER_USEC); \
+               rem = do_div(t, USEC_PER_SEC); \
+       })
+
+/*
+ * The gpu_sched_switch event indicates that a switch from one GPU context to
+ * another occurred on one of the GPU hardware blocks.
+ *
+ * The gpu_name argument identifies the GPU hardware block.  Each independently
+ * scheduled GPU hardware block should have a different name.  This may be used
+ * in different ways for different GPUs.  For example, if a GPU includes
+ * multiple processing cores it may use names "GPU 0", "GPU 1", etc.  If a GPU
+ * includes a separately scheduled 2D and 3D hardware block, it might use the
+ * names "2D" and "3D".
+ *
+ * The timestamp argument is the timestamp at which the switch occurred on the
+ * GPU. These timestamps are in units of nanoseconds and must use
+ * approximately the same time as sched_clock, though they need not come from
+ * any CPU clock. The timestamps for a single hardware block must be
+ * monotonically nondecreasing.  This means that if a variable compensation
+ * offset is used to translate from some other clock to the sched_clock, then
+ * care must be taken when increasing that offset, and doing so may result in
+ * multiple events with the same timestamp.
+ *
+ * The next_ctx_id argument identifies the next context that was running on
+ * the GPU hardware block.  A value of 0 indicates that the hardware block
+ * will be idle.
+ *
+ * The next_prio argument indicates the priority of the next context at the
+ * time of the event.  The exact numeric values may mean different things for
+ * different GPUs, but they should follow the rule that lower values indicate a
+ * higher priority.
+ *
+ * The next_job_id argument identifies the batch of work that the GPU will be
+ * working on.  This should correspond to a job_id that was previously traced
+ * as a gpu_job_enqueue event when the batch of work was created.
+ */
+TRACE_EVENT(gpu_sched_switch,
+
+       TP_PROTO(const char *gpu_name, u64 timestamp,
+               u32 next_ctx_id, s32 next_prio, u32 next_job_id),
+
+       TP_ARGS(gpu_name, timestamp, next_ctx_id, next_prio, next_job_id),
+
+       TP_STRUCT__entry(
+               __string(       gpu_name,       gpu_name        )
+               __field(        u64,            timestamp       )
+               __field(        u32,            next_ctx_id     )
+               __field(        s32,            next_prio       )
+               __field(        u32,            next_job_id     )
+       ),
+
+       TP_fast_assign(
+               __assign_str(gpu_name, gpu_name);
+               __entry->timestamp = timestamp;
+               __entry->next_ctx_id = next_ctx_id;
+               __entry->next_prio = next_prio;
+               __entry->next_job_id = next_job_id;
+       ),
+
+       TP_printk("gpu_name=%s ts=%llu.%06lu next_ctx_id=%lu next_prio=%ld "
+               "next_job_id=%lu",
+               __get_str(gpu_name),
+               (unsigned long long)show_secs_from_ns(__entry->timestamp),
+               (unsigned long)show_usecs_from_ns(__entry->timestamp),
+               (unsigned long)__entry->next_ctx_id,
+               (long)__entry->next_prio,
+               (unsigned long)__entry->next_job_id)
+);
+
+/*
+ * The gpu_job_enqueue event indicates that a batch of work has been queued up
+ * to be processed by the GPU.  This event is not intended to indicate that
+ * the batch of work has been submitted to the GPU hardware, but rather that
+ * it has been submitted to the GPU kernel driver.
+ *
+ * This event should be traced on the thread that initiated the work being
+ * queued.  For example, if a batch of work is submitted to the kernel by a
+ * userland thread, the event should be traced on that thread.
+ *
+ * The ctx_id field identifies the GPU context in which the batch of work
+ * being queued is to be run.
+ *
+ * The job_id field identifies the batch of work being queued within the given
+ * GPU context.  The first batch of work submitted for a given GPU context
+ * should have a job_id of 0, and each subsequent batch of work should
+ * increment the job_id by 1.
+ *
+ * The type field identifies the type of the job being enqueued.  The job
+ * types may be different for different GPU hardware.  For example, a GPU may
+ * differentiate between "2D", "3D", and "compute" jobs.
+ */
+TRACE_EVENT(gpu_job_enqueue,
+
+       TP_PROTO(u32 ctx_id, u32 job_id, const char *type),
+
+       TP_ARGS(ctx_id, job_id, type),
+
+       TP_STRUCT__entry(
+               __field(        u32,            ctx_id          )
+               __field(        u32,            job_id          )
+               __string(       type,           type            )
+       ),
+
+       TP_fast_assign(
+               __entry->ctx_id = ctx_id;
+               __entry->job_id = job_id;
+               __assign_str(type, type);
+       ),
+
+       TP_printk("ctx_id=%lu job_id=%lu type=%s",
+               (unsigned long)__entry->ctx_id,
+               (unsigned long)__entry->job_id,
+               __get_str(type))
+);
+
+#undef show_secs_from_ns
+#undef show_usecs_from_ns
+
+#endif /* _TRACE_GPU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h
new file mode 100644 (file)
index 0000000..82b368d
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mmc
+
+#if !defined(_TRACE_MMC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MMC_H
+
+#include <linux/tracepoint.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/core.h>
+
+/*
+ * Unconditional logging of mmc block erase operations,
+ * including cmd, address, size
+ */
+DECLARE_EVENT_CLASS(mmc_blk_erase_class,
+       TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
+       TP_ARGS(cmd, addr, size),
+       TP_STRUCT__entry(
+               __field(unsigned int, cmd)
+               __field(unsigned int, addr)
+               __field(unsigned int, size)
+       ),
+       TP_fast_assign(
+               __entry->cmd = cmd;
+               __entry->addr = addr;
+               __entry->size = size;
+       ),
+       TP_printk("cmd=%u,addr=0x%08x,size=0x%08x",
+                 __entry->cmd, __entry->addr, __entry->size)
+);
+
+DEFINE_EVENT(mmc_blk_erase_class, mmc_blk_erase_start,
+       TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
+       TP_ARGS(cmd, addr, size));
+
+DEFINE_EVENT(mmc_blk_erase_class, mmc_blk_erase_end,
+       TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
+       TP_ARGS(cmd, addr, size));
+
+/*
+ * Logging of start of read or write mmc block operation,
+ * including cmd, address, size
+ */
+DECLARE_EVENT_CLASS(mmc_blk_rw_class,
+       TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
+       TP_ARGS(cmd, addr, data),
+       TP_STRUCT__entry(
+               __field(unsigned int, cmd)
+               __field(unsigned int, addr)
+               __field(unsigned int, size)
+       ),
+       TP_fast_assign(
+               __entry->cmd = cmd;
+               __entry->addr = addr;
+               __entry->size = data->blocks;
+       ),
+       TP_printk("cmd=%u,addr=0x%08x,size=0x%08x",
+                 __entry->cmd, __entry->addr, __entry->size)
+);
+
+DEFINE_EVENT_CONDITION(mmc_blk_rw_class, mmc_blk_rw_start,
+       TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
+       TP_ARGS(cmd, addr, data),
+       TP_CONDITION(((cmd == MMC_READ_MULTIPLE_BLOCK) ||
+                     (cmd == MMC_WRITE_MULTIPLE_BLOCK)) &&
+                     data));
+
+DEFINE_EVENT_CONDITION(mmc_blk_rw_class, mmc_blk_rw_end,
+       TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
+       TP_ARGS(cmd, addr, data),
+       TP_CONDITION(((cmd == MMC_READ_MULTIPLE_BLOCK) ||
+                     (cmd == MMC_WRITE_MULTIPLE_BLOCK)) &&
+                     data));
+#endif /* _TRACE_MMC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 284244ebfe8d2c3ef7036c3a7e9b55ea15b11078..8924cc2b4ca8c45d867d00bbb522fa3eb37e74cd 100644 (file)
@@ -120,6 +120,38 @@ DEFINE_EVENT(cpu, cpu_frequency,
        TP_ARGS(frequency, cpu_id)
 );
 
+TRACE_EVENT(cpu_frequency_limits,
+
+       TP_PROTO(unsigned int max_freq, unsigned int min_freq,
+               unsigned int cpu_id),
+
+       TP_ARGS(max_freq, min_freq, cpu_id),
+
+       TP_STRUCT__entry(
+               __field(        u32,            min_freq        )
+               __field(        u32,            max_freq        )
+               __field(        u32,            cpu_id          )
+       ),
+
+       TP_fast_assign(
+               __entry->min_freq = min_freq;
+               __entry->max_freq = max_freq;
+               __entry->cpu_id = cpu_id;
+       ),
+
+       TP_printk("min=%lu max=%lu cpu_id=%lu",
+                 (unsigned long)__entry->min_freq,
+                 (unsigned long)__entry->max_freq,
+                 (unsigned long)__entry->cpu_id)
+);
+
+DEFINE_EVENT(cpu, cpu_capacity,
+
+       TP_PROTO(unsigned int capacity, unsigned int cpu_id),
+
+       TP_ARGS(capacity, cpu_id)
+);
+
 TRACE_EVENT(device_pm_callback_start,
 
        TP_PROTO(struct device *dev, const char *pm_ops, int event),
@@ -273,6 +305,25 @@ DEFINE_EVENT(clock, clock_set_rate,
        TP_ARGS(name, state, cpu_id)
 );
 
+TRACE_EVENT(clock_set_parent,
+
+       TP_PROTO(const char *name, const char *parent_name),
+
+       TP_ARGS(name, parent_name),
+
+       TP_STRUCT__entry(
+               __string(       name,           name            )
+               __string(       parent_name,    parent_name     )
+       ),
+
+       TP_fast_assign(
+               __assign_str(name, name);
+               __assign_str(parent_name, parent_name);
+       ),
+
+       TP_printk("%s parent=%s", __get_str(name), __get_str(parent_name))
+);
+
 /*
  * The power domain events are used for power domains transitions
  */
index 9b90c57517a918687189933ae6920b80d251e98e..dffaffab4bc88696b8c8d0cbb98f518341648fd7 100644 (file)
@@ -191,6 +191,31 @@ TRACE_EVENT(sched_migrate_task,
                  __entry->orig_cpu, __entry->dest_cpu)
 );
 
+/*
+ * Tracepoint for a CPU going offline/online:
+ */
+TRACE_EVENT(sched_cpu_hotplug,
+
+       TP_PROTO(int affected_cpu, int error, int status),
+
+       TP_ARGS(affected_cpu, error, status),
+
+       TP_STRUCT__entry(
+               __field(        int,    affected_cpu            )
+               __field(        int,    error                   )
+               __field(        int,    status                  )
+       ),
+
+       TP_fast_assign(
+               __entry->affected_cpu   = affected_cpu;
+               __entry->error          = error;
+               __entry->status         = status;
+       ),
+
+       TP_printk("cpu %d %s error=%d", __entry->affected_cpu,
+               __entry->status ? "online" : "offline", __entry->error)
+);
+
 DECLARE_EVENT_CLASS(sched_process_template,
 
        TP_PROTO(struct task_struct *p),
@@ -219,7 +244,7 @@ DECLARE_EVENT_CLASS(sched_process_template,
 DEFINE_EVENT(sched_process_template, sched_process_free,
             TP_PROTO(struct task_struct *p),
             TP_ARGS(p));
-            
+
 
 /*
  * Tracepoint for a task exiting:
@@ -373,6 +398,30 @@ DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
             TP_PROTO(struct task_struct *tsk, u64 delay),
             TP_ARGS(tsk, delay));
 
+/*
+ * Tracepoint for recording the cause of uninterruptible sleep.
+ */
+TRACE_EVENT(sched_blocked_reason,
+
+       TP_PROTO(struct task_struct *tsk),
+
+       TP_ARGS(tsk),
+
+       TP_STRUCT__entry(
+               __field( pid_t, pid     )
+               __field( void*, caller  )
+               __field( bool, io_wait  )
+       ),
+
+       TP_fast_assign(
+               __entry->pid    = tsk->pid;
+               __entry->caller = (void*)get_wchan(tsk);
+               __entry->io_wait = tsk->in_iowait;
+       ),
+
+       TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
+);
+
 /*
  * Tracepoint for accounting runtime (time the task is executing
  * on a CPU).
@@ -562,6 +611,504 @@ TRACE_EVENT(sched_wake_idle_without_ipi,
 
        TP_printk("cpu=%d", __entry->cpu)
 );
+
+TRACE_EVENT(sched_contrib_scale_f,
+
+       TP_PROTO(int cpu, unsigned long freq_scale_factor,
+                unsigned long cpu_scale_factor),
+
+       TP_ARGS(cpu, freq_scale_factor, cpu_scale_factor),
+
+       TP_STRUCT__entry(
+               __field(int, cpu)
+               __field(unsigned long, freq_scale_factor)
+               __field(unsigned long, cpu_scale_factor)
+       ),
+
+       TP_fast_assign(
+               __entry->cpu = cpu;
+               __entry->freq_scale_factor = freq_scale_factor;
+               __entry->cpu_scale_factor = cpu_scale_factor;
+       ),
+
+       TP_printk("cpu=%d freq_scale_factor=%lu cpu_scale_factor=%lu",
+                 __entry->cpu, __entry->freq_scale_factor,
+                 __entry->cpu_scale_factor)
+);
+
+#ifdef CONFIG_SMP
+
+/*
+ * Tracepoint for accounting sched averages for tasks.
+ */
+TRACE_EVENT(sched_load_avg_task,
+
+       TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
+
+       TP_ARGS(tsk, avg),
+
+       TP_STRUCT__entry(
+               __array( char,  comm,   TASK_COMM_LEN           )
+               __field( pid_t, pid                             )
+               __field( int,   cpu                             )
+               __field( unsigned long, load_avg                )
+               __field( unsigned long, util_avg                )
+               __field( u64,           load_sum                )
+               __field( u32,           util_sum                )
+               __field( u32,           period_contrib          )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid                    = tsk->pid;
+               __entry->cpu                    = task_cpu(tsk);
+               __entry->load_avg               = avg->load_avg;
+               __entry->util_avg               = avg->util_avg;
+               __entry->load_sum               = avg->load_sum;
+               __entry->util_sum               = avg->util_sum;
+               __entry->period_contrib         = avg->period_contrib;
+       ),
+
+       TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu load_sum=%llu"
+                 " util_sum=%u period_contrib=%u",
+                 __entry->comm,
+                 __entry->pid,
+                 __entry->cpu,
+                 __entry->load_avg,
+                 __entry->util_avg,
+                 (u64)__entry->load_sum,
+                 (u32)__entry->util_sum,
+                 (u32)__entry->period_contrib)
+);
+
+/*
+ * Tracepoint for accounting sched averages for cpus.
+ */
+TRACE_EVENT(sched_load_avg_cpu,
+
+       TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
+
+       TP_ARGS(cpu, cfs_rq),
+
+       TP_STRUCT__entry(
+               __field( int,   cpu                             )
+               __field( unsigned long, load_avg                )
+               __field( unsigned long, util_avg                )
+       ),
+
+       TP_fast_assign(
+               __entry->cpu                    = cpu;
+               __entry->load_avg               = cfs_rq->avg.load_avg;
+               __entry->util_avg               = cfs_rq->avg.util_avg;
+       ),
+
+       TP_printk("cpu=%d load_avg=%lu util_avg=%lu",
+                 __entry->cpu, __entry->load_avg, __entry->util_avg)
+);
+
+/*
+ * Tracepoint for sched_tune_config settings
+ */
+TRACE_EVENT(sched_tune_config,
+
+       TP_PROTO(int boost),
+
+       TP_ARGS(boost),
+
+       TP_STRUCT__entry(
+               __field( int,   boost           )
+       ),
+
+       TP_fast_assign(
+               __entry->boost  = boost;
+       ),
+
+       TP_printk("boost=%d ", __entry->boost)
+);
+
+/*
+ * Tracepoint for accounting CPU  boosted utilization
+ */
+TRACE_EVENT(sched_boost_cpu,
+
+       TP_PROTO(int cpu, unsigned long util, long margin),
+
+       TP_ARGS(cpu, util, margin),
+
+       TP_STRUCT__entry(
+               __field( int,           cpu                     )
+               __field( unsigned long, util                    )
+               __field(long,           margin                  )
+       ),
+
+       TP_fast_assign(
+               __entry->cpu    = cpu;
+               __entry->util   = util;
+               __entry->margin = margin;
+       ),
+
+       TP_printk("cpu=%d util=%lu margin=%ld",
+                 __entry->cpu,
+                 __entry->util,
+                 __entry->margin)
+);
+
+/*
+ * Tracepoint for schedtune_tasks_update
+ */
+TRACE_EVENT(sched_tune_tasks_update,
+
+       TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx,
+               int boost, int max_boost),
+
+       TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost),
+
+       TP_STRUCT__entry(
+               __array( char,  comm,   TASK_COMM_LEN   )
+               __field( pid_t,         pid             )
+               __field( int,           cpu             )
+               __field( int,           tasks           )
+               __field( int,           idx             )
+               __field( int,           boost           )
+               __field( int,           max_boost       )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid            = tsk->pid;
+               __entry->cpu            = cpu;
+               __entry->tasks          = tasks;
+               __entry->idx            = idx;
+               __entry->boost          = boost;
+               __entry->max_boost      = max_boost;
+       ),
+
+       TP_printk("pid=%d comm=%s "
+                       "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d",
+               __entry->pid, __entry->comm,
+               __entry->cpu, __entry->tasks, __entry->idx,
+               __entry->boost, __entry->max_boost)
+);
+
+/*
+ * Tracepoint for schedtune_boostgroup_update
+ */
+TRACE_EVENT(sched_tune_boostgroup_update,
+
+       TP_PROTO(int cpu, int variation, int max_boost),
+
+       TP_ARGS(cpu, variation, max_boost),
+
+       TP_STRUCT__entry(
+               __field( int,   cpu             )
+               __field( int,   variation       )
+               __field( int,   max_boost       )
+       ),
+
+       TP_fast_assign(
+               __entry->cpu            = cpu;
+               __entry->variation      = variation;
+               __entry->max_boost      = max_boost;
+       ),
+
+       TP_printk("cpu=%d variation=%d max_boost=%d",
+               __entry->cpu, __entry->variation, __entry->max_boost)
+);
+
+/*
+ * Tracepoint for accounting task boosted utilization
+ */
+TRACE_EVENT(sched_boost_task,
+
+       TP_PROTO(struct task_struct *tsk, unsigned long util, long margin),
+
+       TP_ARGS(tsk, util, margin),
+
+       TP_STRUCT__entry(
+               __array( char,  comm,   TASK_COMM_LEN           )
+               __field( pid_t,         pid                     )
+               __field( unsigned long, util                    )
+               __field( long,          margin                  )
+
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid    = tsk->pid;
+               __entry->util   = util;
+               __entry->margin = margin;
+       ),
+
+       TP_printk("comm=%s pid=%d util=%lu margin=%ld",
+                 __entry->comm, __entry->pid,
+                 __entry->util,
+                 __entry->margin)
+);
+
+/*
+ * Tracepoint for accounting sched group energy
+ */
+TRACE_EVENT(sched_energy_diff,
+
+       TP_PROTO(struct task_struct *tsk, int scpu, int dcpu, int udelta,
+               int nrgb, int nrga, int nrgd, int capb, int capa, int capd,
+               int nrgn, int nrgp),
+
+       TP_ARGS(tsk, scpu, dcpu, udelta,
+               nrgb, nrga, nrgd, capb, capa, capd,
+               nrgn, nrgp),
+
+       TP_STRUCT__entry(
+               __array( char,  comm,   TASK_COMM_LEN   )
+               __field( pid_t, pid     )
+               __field( int,   scpu    )
+               __field( int,   dcpu    )
+               __field( int,   udelta  )
+               __field( int,   nrgb    )
+               __field( int,   nrga    )
+               __field( int,   nrgd    )
+               __field( int,   capb    )
+               __field( int,   capa    )
+               __field( int,   capd    )
+               __field( int,   nrgn    )
+               __field( int,   nrgp    )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid            = tsk->pid;
+               __entry->scpu           = scpu;
+               __entry->dcpu           = dcpu;
+               __entry->udelta         = udelta;
+               __entry->nrgb           = nrgb;
+               __entry->nrga           = nrga;
+               __entry->nrgd           = nrgd;
+               __entry->capb           = capb;
+               __entry->capa           = capa;
+               __entry->capd           = capd;
+               __entry->nrgn           = nrgn;
+               __entry->nrgp           = nrgp;
+       ),
+
+       TP_printk("pid=%d comm=%s "
+                       "src_cpu=%d dst_cpu=%d usage_delta=%d "
+                       "nrg_before=%d nrg_after=%d nrg_diff=%d "
+                       "cap_before=%d cap_after=%d cap_delta=%d "
+                       "nrg_delta=%d nrg_payoff=%d",
+               __entry->pid, __entry->comm,
+               __entry->scpu, __entry->dcpu, __entry->udelta,
+               __entry->nrgb, __entry->nrga, __entry->nrgd,
+               __entry->capb, __entry->capa, __entry->capd,
+               __entry->nrgn, __entry->nrgp)
+);
+
+/*
+ * Tracepoint for schedtune_tasks_update
+ */
+TRACE_EVENT(sched_tune_filter,
+
+       TP_PROTO(int nrg_delta, int cap_delta,
+                int nrg_gain,  int cap_gain,
+                int payoff, int region),
+
+       TP_ARGS(nrg_delta, cap_delta, nrg_gain, cap_gain, payoff, region),
+
+       TP_STRUCT__entry(
+               __field( int,   nrg_delta       )
+               __field( int,   cap_delta       )
+               __field( int,   nrg_gain        )
+               __field( int,   cap_gain        )
+               __field( int,   payoff          )
+               __field( int,   region          )
+       ),
+
+       TP_fast_assign(
+               __entry->nrg_delta      = nrg_delta;
+               __entry->cap_delta      = cap_delta;
+               __entry->nrg_gain       = nrg_gain;
+               __entry->cap_gain       = cap_gain;
+               __entry->payoff         = payoff;
+               __entry->region         = region;
+       ),
+
+       TP_printk("nrg_delta=%d cap_delta=%d nrg_gain=%d cap_gain=%d payoff=%d region=%d",
+               __entry->nrg_delta, __entry->cap_delta,
+               __entry->nrg_gain, __entry->cap_gain,
+               __entry->payoff, __entry->region)
+);
+
+/*
+ * Tracepoint for system overutilized flag
+ */
+TRACE_EVENT(sched_overutilized,
+
+       TP_PROTO(bool overutilized),
+
+       TP_ARGS(overutilized),
+
+       TP_STRUCT__entry(
+               __field( bool,  overutilized    )
+       ),
+
+       TP_fast_assign(
+               __entry->overutilized   = overutilized;
+       ),
+
+       TP_printk("overutilized=%d",
+               __entry->overutilized ? 1 : 0)
+);
+#ifdef CONFIG_SCHED_WALT
+struct rq;
+
+TRACE_EVENT(walt_update_task_ravg,
+
+       TP_PROTO(struct task_struct *p, struct rq *rq, int evt,
+                                               u64 wallclock, u64 irqtime),
+
+       TP_ARGS(p, rq, evt, wallclock, irqtime),
+
+       TP_STRUCT__entry(
+               __array(        char,   comm,   TASK_COMM_LEN   )
+               __field(        pid_t,  pid                     )
+               __field(        pid_t,  cur_pid                 )
+               __field(unsigned int,   cur_freq                )
+               __field(        u64,    wallclock               )
+               __field(        u64,    mark_start              )
+               __field(        u64,    delta_m                 )
+               __field(        u64,    win_start               )
+               __field(        u64,    delta                   )
+               __field(        u64,    irqtime                 )
+               __field(        int,    evt                     )
+               __field(unsigned int,   demand                  )
+               __field(unsigned int,   sum                     )
+               __field(         int,   cpu                     )
+               __field(        u64,    cs                      )
+               __field(        u64,    ps                      )
+               __field(        u32,    curr_window             )
+               __field(        u32,    prev_window             )
+               __field(        u64,    nt_cs                   )
+               __field(        u64,    nt_ps                   )
+               __field(        u32,    active_windows          )
+       ),
+
+       TP_fast_assign(
+               __entry->wallclock      = wallclock;
+               __entry->win_start      = rq->window_start;
+               __entry->delta          = (wallclock - rq->window_start);
+               __entry->evt            = evt;
+               __entry->cpu            = rq->cpu;
+               __entry->cur_pid        = rq->curr->pid;
+               __entry->cur_freq       = rq->cur_freq;
+               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+               __entry->pid            = p->pid;
+               __entry->mark_start     = p->ravg.mark_start;
+               __entry->delta_m        = (wallclock - p->ravg.mark_start);
+               __entry->demand         = p->ravg.demand;
+               __entry->sum            = p->ravg.sum;
+               __entry->irqtime        = irqtime;
+               __entry->cs             = rq->curr_runnable_sum;
+               __entry->ps             = rq->prev_runnable_sum;
+               __entry->curr_window    = p->ravg.curr_window;
+               __entry->prev_window    = p->ravg.prev_window;
+               __entry->nt_cs          = rq->nt_curr_runnable_sum;
+               __entry->nt_ps          = rq->nt_prev_runnable_sum;
+               __entry->active_windows = p->ravg.active_windows;
+       ),
+
+       TP_printk("wc %llu ws %llu delta %llu event %d cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu"
+               " cs %llu ps %llu cur_window %u prev_window %u nt_cs %llu nt_ps %llu active_wins %u"
+               , __entry->wallclock, __entry->win_start, __entry->delta,
+               __entry->evt, __entry->cpu,
+               __entry->cur_freq, __entry->cur_pid,
+               __entry->pid, __entry->comm, __entry->mark_start,
+               __entry->delta_m, __entry->demand,
+               __entry->sum, __entry->irqtime,
+               __entry->cs, __entry->ps,
+               __entry->curr_window, __entry->prev_window,
+                 __entry->nt_cs, __entry->nt_ps,
+                 __entry->active_windows
+               )
+);
+
+TRACE_EVENT(walt_update_history,
+
+       TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
+                       int evt),
+
+       TP_ARGS(rq, p, runtime, samples, evt),
+
+       TP_STRUCT__entry(
+               __array(        char,   comm,   TASK_COMM_LEN   )
+               __field(        pid_t,  pid                     )
+               __field(unsigned int,   runtime                 )
+               __field(         int,   samples                 )
+               __field(         int,   evt                     )
+               __field(         u64,   demand                  )
+               __field(unsigned int,   walt_avg                )
+               __field(unsigned int,   pelt_avg                )
+               __array(         u32,   hist, RAVG_HIST_SIZE_MAX)
+               __field(         int,   cpu                     )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+               __entry->pid            = p->pid;
+               __entry->runtime        = runtime;
+               __entry->samples        = samples;
+               __entry->evt            = evt;
+               __entry->demand         = p->ravg.demand;
+               __entry->walt_avg       = (__entry->demand << 10);
+               do_div(__entry->walt_avg, walt_ravg_window);
+               __entry->pelt_avg       = p->se.avg.util_avg;
+               memcpy(__entry->hist, p->ravg.sum_history,
+                                       RAVG_HIST_SIZE_MAX * sizeof(u32));
+               __entry->cpu            = rq->cpu;
+       ),
+
+       TP_printk("%d (%s): runtime %u samples %d event %d demand %llu"
+               " walt %u pelt %u (hist: %u %u %u %u %u) cpu %d",
+               __entry->pid, __entry->comm,
+               __entry->runtime, __entry->samples, __entry->evt,
+               __entry->demand,
+               __entry->walt_avg,
+               __entry->pelt_avg,
+               __entry->hist[0], __entry->hist[1],
+               __entry->hist[2], __entry->hist[3],
+               __entry->hist[4], __entry->cpu)
+);
+
+TRACE_EVENT(walt_migration_update_sum,
+
+       TP_PROTO(struct rq *rq, struct task_struct *p),
+
+       TP_ARGS(rq, p),
+
+       TP_STRUCT__entry(
+               __field(int,            cpu                     )
+               __field(int,            pid                     )
+               __field(        u64,    cs                      )
+               __field(        u64,    ps                      )
+               __field(        s64,    nt_cs                   )
+               __field(        s64,    nt_ps                   )
+       ),
+
+       TP_fast_assign(
+               __entry->cpu            = cpu_of(rq);
+               __entry->cs             = rq->curr_runnable_sum;
+               __entry->ps             = rq->prev_runnable_sum;
+               __entry->nt_cs          = (s64)rq->nt_curr_runnable_sum;
+               __entry->nt_ps          = (s64)rq->nt_prev_runnable_sum;
+               __entry->pid            = p->pid;
+       ),
+
+       TP_printk("cpu %d: cs %llu ps %llu nt_cs %lld nt_ps %lld pid %d",
+                 __entry->cpu, __entry->cs, __entry->ps,
+                 __entry->nt_cs, __entry->nt_ps, __entry->pid)
+);
+#endif /* CONFIG_SCHED_WALT */
+
+#endif /* CONFIG_SMP */
+
 #endif /* _TRACE_SCHED_H */
 
 /* This part must be outside protection */
index 41420e341e75de5f2e46cd4fac5eab9e15f68168..51f891fb1b18ad9b14713fe3a4f2a541c9ca5442 100644 (file)
@@ -33,6 +33,8 @@ enum {
        BINDER_TYPE_HANDLE      = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
        BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
        BINDER_TYPE_FD          = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+       BINDER_TYPE_FDA         = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE),
+       BINDER_TYPE_PTR         = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
 };
 
 enum {
@@ -48,6 +50,14 @@ typedef __u64 binder_size_t;
 typedef __u64 binder_uintptr_t;
 #endif
 
+/**
+ * struct binder_object_header - header shared by all binder metadata objects.
+ * @type:      type of the object
+ */
+struct binder_object_header {
+       __u32        type;
+};
+
 /*
  * This is the flattened representation of a Binder object for transfer
  * between processes.  The 'offsets' supplied as part of a binder transaction
@@ -56,9 +66,8 @@ typedef __u64 binder_uintptr_t;
  * between processes.
  */
 struct flat_binder_object {
-       /* 8 bytes for large_flat_header. */
-       __u32           type;
-       __u32           flags;
+       struct binder_object_header     hdr;
+       __u32                           flags;
 
        /* 8 bytes of data. */
        union {
@@ -70,6 +79,84 @@ struct flat_binder_object {
        binder_uintptr_t        cookie;
 };
 
+/**
+ * struct binder_fd_object - describes a filedescriptor to be fixed up.
+ * @hdr:       common header structure
+ * @pad_flags: padding to remain compatible with old userspace code
+ * @pad_binder:        padding to remain compatible with old userspace code
+ * @fd:                file descriptor
+ * @cookie:    opaque data, used by user-space
+ */
+struct binder_fd_object {
+       struct binder_object_header     hdr;
+       __u32                           pad_flags;
+       union {
+               binder_uintptr_t        pad_binder;
+               __u32                   fd;
+       };
+
+       binder_uintptr_t                cookie;
+};
+
+/* struct binder_buffer_object - object describing a userspace buffer
+ * @hdr:               common header structure
+ * @flags:             one or more BINDER_BUFFER_* flags
+ * @buffer:            address of the buffer
+ * @length:            length of the buffer
+ * @parent:            index in offset array pointing to parent buffer
+ * @parent_offset:     offset in @parent pointing to this buffer
+ *
+ * A binder_buffer object represents an object that the
+ * binder kernel driver can copy verbatim to the target
+ * address space. A buffer itself may be pointed to from
+ * within another buffer, meaning that the pointer inside
+ * that other buffer needs to be fixed up as well. This
+ * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT
+ * flag in @flags, by setting @parent buffer to the index
+ * in the offset array pointing to the parent binder_buffer_object,
+ * and by setting @parent_offset to the offset in the parent buffer
+ * at which the pointer to this buffer is located.
+ */
+struct binder_buffer_object {
+       struct binder_object_header     hdr;
+       __u32                           flags;
+       binder_uintptr_t                buffer;
+       binder_size_t                   length;
+       binder_size_t                   parent;
+       binder_size_t                   parent_offset;
+};
+
+enum {
+       BINDER_BUFFER_FLAG_HAS_PARENT = 0x01,
+};
+
+/* struct binder_fd_array_object - object describing an array of fds in a buffer
+ * @hdr:               common header structure
+ * @num_fds:           number of file descriptors in the buffer
+ * @parent:            index in offset array to buffer holding the fd array
+ * @parent_offset:     start offset of fd array in the buffer
+ *
+ * A binder_fd_array object represents an array of file
+ * descriptors embedded in a binder_buffer_object. It is
+ * different from a regular binder_buffer_object because it
+ * describes a list of file descriptors to fix up, not an opaque
+ * blob of memory, and hence the kernel needs to treat it differently.
+ *
+ * An example of how this would be used is with Android's
+ * native_handle_t object, which is a struct with a list of integers
+ * and a list of file descriptors. The native_handle_t struct itself
+ * will be represented by a struct binder_buffer_objct, whereas the
+ * embedded list of file descriptors is represented by a
+ * struct binder_fd_array_object with that binder_buffer_object as
+ * a parent.
+ */
+struct binder_fd_array_object {
+       struct binder_object_header     hdr;
+       binder_size_t                   num_fds;
+       binder_size_t                   parent;
+       binder_size_t                   parent_offset;
+};
+
 /*
  * On 64-bit platforms where user code may run in 32-bits the driver must
  * translate the buffer (and local binder) addresses appropriately.
@@ -162,6 +249,11 @@ struct binder_transaction_data {
        } data;
 };
 
+struct binder_transaction_data_sg {
+       struct binder_transaction_data transaction_data;
+       binder_size_t buffers_size;
+};
+
 struct binder_ptr_cookie {
        binder_uintptr_t ptr;
        binder_uintptr_t cookie;
@@ -346,6 +438,12 @@ enum binder_driver_command_protocol {
        /*
         * void *: cookie
         */
+
+       BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg),
+       BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg),
+       /*
+        * binder_transaction_data_sg: the sent command.
+        */
 };
 
 #endif /* _UAPI_LINUX_BINDER_H */
index 96161b8202b5d026ed39904f15a899659fc39adb..ce19c5bf51f7ed279ae23cd64afebec322bb187e 100644 (file)
@@ -49,6 +49,8 @@ enum {
        FRA_TABLE,      /* Extended table id */
        FRA_FWMASK,     /* mask for netfilter mark */
        FRA_OIFNAME,
+       FRA_UID_START,  /* UID range */
+       FRA_UID_END,
        __FRA_MAX
 };
 
index f15d980249b502e4638e6125f04e595f0f52035e..cd0c1a1a9ccfac53c91b8c454e9e92e26508e119 100644 (file)
@@ -160,6 +160,8 @@ struct inodes_stat_t {
 #define FITHAW         _IOWR('X', 120, int)    /* Thaw */
 #define FITRIM         _IOWR('X', 121, struct fstrim_range)    /* Trim */
 
+#define FIDTRIM        _IOWR('f', 128, struct fstrim_range)    /* Deep discard trim */
+
 #define        FS_IOC_GETFLAGS                 _IOR('f', 1, long)
 #define        FS_IOC_SETFLAGS                 _IOW('f', 2, long)
 #define        FS_IOC_GETVERSION               _IOR('v', 1, long)
index c9aca042e61d197927409531af4f8cdd88218adc..8485dd4300b807a0d97df109a77ddc3ba6a2defd 100644 (file)
@@ -358,6 +358,7 @@ enum fuse_opcode {
        FUSE_FALLOCATE     = 43,
        FUSE_READDIRPLUS   = 44,
        FUSE_RENAME2       = 45,
+       FUSE_CANONICAL_PATH= 2016,
 
        /* CUSE specific operations */
        CUSE_INIT          = 4096,
diff --git a/include/uapi/linux/if_pppolac.h b/include/uapi/linux/if_pppolac.h
new file mode 100644 (file)
index 0000000..b7eb815
--- /dev/null
@@ -0,0 +1,33 @@
+/* include/uapi/linux/if_pppolac.h
+ *
+ * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_LINUX_IF_PPPOLAC_H
+#define _UAPI_LINUX_IF_PPPOLAC_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppolac {
+       sa_family_t     sa_family;      /* AF_PPPOX */
+       unsigned int    sa_protocol;    /* PX_PROTO_OLAC */
+       int             udp_socket;
+       struct __attribute__((packed)) {
+               __u16   tunnel, session;
+       } local, remote;
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_IF_PPPOLAC_H */
diff --git a/include/uapi/linux/if_pppopns.h b/include/uapi/linux/if_pppopns.h
new file mode 100644 (file)
index 0000000..a392b52
--- /dev/null
@@ -0,0 +1,32 @@
+/* include/uapi/linux/if_pppopns.h
+ *
+ * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_LINUX_IF_PPPOPNS_H
+#define _UAPI_LINUX_IF_PPPOPNS_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppopns {
+       sa_family_t     sa_family;      /* AF_PPPOX */
+       unsigned int    sa_protocol;    /* PX_PROTO_OPNS */
+       int             tcp_socket;
+       __u16           local;
+       __u16           remote;
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_IF_PPPOPNS_H */
index e128769331b5a7c6be4a10cf44c262be68a65772..5861d45bc51dde9eb071b0b214cb86177064987c 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/socket.h>
 #include <linux/if_ether.h>
 #include <linux/if_pppol2tp.h>
+#include <linux/if_pppolac.h>
+#include <linux/if_pppopns.h>
 
 /* For user-space programs to pick up these definitions
  * which they wouldn't get otherwise without defining __KERNEL__
@@ -56,7 +58,9 @@ struct pptp_addr {
 #define PX_PROTO_OE    0 /* Currently just PPPoE */
 #define PX_PROTO_OL2TP 1 /* Now L2TP also */
 #define PX_PROTO_PPTP  2
-#define PX_MAX_PROTO   3
+#define PX_PROTO_OLAC  3
+#define PX_PROTO_OPNS  4
+#define PX_MAX_PROTO   5
 
 struct sockaddr_pppox {
        __kernel_sa_family_t sa_family;       /* address family, AF_PPPOX */
index 68a1f71fde9f7bcc90c21f586b57f1eab61de941..c7f189bd597997214f9c5f027562abac8ab154b1 100644 (file)
@@ -72,6 +72,8 @@ enum {
        INET_DIAG_BC_AUTO,
        INET_DIAG_BC_S_COND,
        INET_DIAG_BC_D_COND,
+       INET_DIAG_BC_DEV_COND,   /* u32 ifindex */
+       INET_DIAG_BC_MARK_COND,
 };
 
 struct inet_diag_hostcond {
@@ -81,6 +83,11 @@ struct inet_diag_hostcond {
        __be32  addr[0];
 };
 
+struct inet_diag_markcond {
+       __u32 mark;
+       __u32 mask;
+};
+
 /* Base info structure. It contains socket identity (addrs/ports/cookie)
  * and, alas, the information shown by netstat. */
 struct inet_diag_msg {
@@ -113,9 +120,13 @@ enum {
        INET_DIAG_DCTCPINFO,
        INET_DIAG_PROTOCOL,  /* response attribute only */
        INET_DIAG_SKV6ONLY,
+       INET_DIAG_LOCALS,
+       INET_DIAG_PEERS,
+       INET_DIAG_PAD,
+       INET_DIAG_MARK,
 };
 
-#define INET_DIAG_MAX INET_DIAG_SKV6ONLY
+#define INET_DIAG_MAX INET_DIAG_MARK
 
 /* INET_DIAG_MEM */
 
index 38b4fef20219242fad287c0379e5ff58b7dedcea..2b1533859749077a09f5e50566a8ffa33156bc39 100644 (file)
@@ -164,6 +164,7 @@ enum {
        DEVCONF_ACCEPT_DAD,
        DEVCONF_FORCE_TLLAO,
        DEVCONF_NDISC_NOTIFY,
+       DEVCONF_ACCEPT_RA_RT_TABLE,
        DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL,
        DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
        DEVCONF_SUPPRESS_FRAG_NDISC,
diff --git a/include/uapi/linux/keychord.h b/include/uapi/linux/keychord.h
new file mode 100644 (file)
index 0000000..ea7cf4d
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ *  Key chord input driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _UAPI_LINUX_KEYCHORD_H_
+#define _UAPI_LINUX_KEYCHORD_H_
+
+#include <linux/input.h>
+
+#define KEYCHORD_VERSION               1
+
+/*
+ * One or more input_keychord structs are written to /dev/keychord
+ * at once to specify the list of keychords to monitor.
+ * Reading /dev/keychord returns the id of a keychord when the
+ * keychord combination is pressed.  A keychord is signalled when
+ * all of the keys in the keycode list are in the pressed state.
+ * The order in which the keys are pressed does not matter.
+ * The keychord will not be signalled if keys not in the keycode
+ * list are pressed.
+ * Keychords will not be signalled on key release events.
+ */
+struct input_keychord {
+       /* should be KEYCHORD_VERSION */
+       __u16 version;
+       /*
+        * client specified ID, returned from read()
+        * when this keychord is pressed.
+        */
+       __u16 id;
+
+       /* number of keycodes in this keychord */
+       __u16 count;
+
+       /* variable length array of keycodes */
+       __u16 keycodes[];
+};
+
+#endif /* _UAPI_LINUX_KEYCHORD_H_ */
index b283d56c1db97955f0558776fdc98e03294b7ce0..d588107c9dcee80c2ca7a31cc9eef916933e01ef 100644 (file)
@@ -52,6 +52,8 @@
 #define REISER2FS_SUPER_MAGIC_STRING   "ReIsEr2Fs"
 #define REISER2FS_JR_SUPER_MAGIC_STRING        "ReIsEr3Fs"
 
+#define SDCARDFS_SUPER_MAGIC   0xb550ca10
+
 #define SMB_SUPER_MAGIC                0x517B
 #define CGROUP_SUPER_MAGIC     0x27e0eb
 #define CGROUP2_SUPER_MAGIC    0x63677270
index 208ae938733143ce0ba2117378423d443a3d8312..faaa28b3d0613e9ff68618f0fa89db80158517e9 100644 (file)
@@ -4,6 +4,7 @@
  * Header file for Xtables timer target module.
  *
  * Copyright (C) 2004, 2010 Nokia Corporation
+ *
  * Written by Timo Teras <ext-timo.teras@nokia.com>
  *
  * Converted to x_tables and forward-ported to 2.6.34
 #include <linux/types.h>
 
 #define MAX_IDLETIMER_LABEL_SIZE 28
+#define NLMSG_MAX_SIZE 64
+
+#define NL_EVENT_TYPE_INACTIVE 0
+#define NL_EVENT_TYPE_ACTIVE 1
 
 struct idletimer_tg_info {
        __u32 timeout;
 
        char label[MAX_IDLETIMER_LABEL_SIZE];
 
+       /* Use netlink messages for notification in addition to sysfs */
+       __u8 send_nl_msg;
+
        /* for kernel module internal use only */
        struct idletimer_tg *timer __attribute__((aligned(8)));
 };
index 87644f832494997ccc97af831a5fe63b7d183e5f..7f00df6cd8975273d0554bff021703a57542e52b 100644 (file)
@@ -26,4 +26,11 @@ struct xt_socket_mtinfo3 {
                           | XT_SOCKET_NOWILDCARD \
                           | XT_SOCKET_RESTORESKMARK)
 
+struct sock *xt_socket_lookup_slow_v4(struct net *net,
+                                     const struct sk_buff *skb,
+                                     const struct net_device *indev);
+struct sock *xt_socket_lookup_slow_v6(struct net *net,
+                                     const struct sk_buff *skb,
+                                     const struct net_device *indev);
+
 #endif /* _XT_SOCKET_H */
index a8d0759a9e400c5d472fe37d13a924bc9e9777a6..c1af9b3c27c4faae7efa546135d9788463476a46 100644 (file)
@@ -197,4 +197,13 @@ struct prctl_mm_map {
 # define PR_CAP_AMBIENT_LOWER          3
 # define PR_CAP_AMBIENT_CLEAR_ALL      4
 
+/* Sets the timerslack for arbitrary threads
+ * arg2 slack value, 0 means "use default"
+ * arg3 pid of the thread whose timer slack needs to be set
+ */
+#define PR_SET_TIMERSLACK_PID  127
+
+#define PR_SET_VMA             0x53564d41
+# define PR_SET_VMA_ANON_NAME          0
+
 #endif /* _LINUX_PRCTL_H */
index fa3b3436556027aa587cd6d2042c00ba147ef53a..a2fad11894ffd16e5640ad3501eed3aca4b60438 100644 (file)
@@ -306,6 +306,7 @@ enum rtattr_type_t {
        RTA_TABLE,
        RTA_MARK,
        RTA_MFC_STATS,
+       RTA_UID,
        RTA_VIA,
        RTA_NEWDST,
        RTA_PREF,
index 49230d36f9ce783011bd23edb19ba8be0fb751f1..84e66ed670be13611f2a20e9d77ef3b34dfe6ed8 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/types.h>
 
 #define SOCK_DIAG_BY_FAMILY 20
+#define SOCK_DESTROY_BACKPORT 21
 
 struct sock_diag_req {
        __u8    sdiag_family;
diff --git a/include/uapi/linux/usb/f_accessory.h b/include/uapi/linux/usb/f_accessory.h
new file mode 100644 (file)
index 0000000..0baeb7d
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_USB_F_ACCESSORY_H
+#define _UAPI_LINUX_USB_F_ACCESSORY_H
+
+/* Use Google Vendor ID when in accessory mode */
+#define USB_ACCESSORY_VENDOR_ID 0x18D1
+
+
+/* Product ID to use when in accessory mode */
+#define USB_ACCESSORY_PRODUCT_ID 0x2D00
+
+/* Product ID to use when in accessory mode and adb is enabled */
+#define USB_ACCESSORY_ADB_PRODUCT_ID 0x2D01
+
+/* Indexes for strings sent by the host via ACCESSORY_SEND_STRING */
+#define ACCESSORY_STRING_MANUFACTURER   0
+#define ACCESSORY_STRING_MODEL          1
+#define ACCESSORY_STRING_DESCRIPTION    2
+#define ACCESSORY_STRING_VERSION        3
+#define ACCESSORY_STRING_URI            4
+#define ACCESSORY_STRING_SERIAL         5
+
+/* Control request for retrieving device's protocol version
+ *
+ *     requestType:    USB_DIR_IN | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_GET_PROTOCOL
+ *     value:          0
+ *     index:          0
+ *     data            version number (16 bits little endian)
+ *                     1 for original accessory support
+ *                     2 adds HID and device to host audio support
+ */
+#define ACCESSORY_GET_PROTOCOL  51
+
+/* Control request for host to send a string to the device
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_SEND_STRING
+ *     value:          0
+ *     index:          string ID
+ *     data            zero terminated UTF8 string
+ *
+ *  The device can later retrieve these strings via the
+ *  ACCESSORY_GET_STRING_* ioctls
+ */
+#define ACCESSORY_SEND_STRING   52
+
+/* Control request for starting device in accessory mode.
+ * The host sends this after setting all its strings to the device.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_START
+ *     value:          0
+ *     index:          0
+ *     data            none
+ */
+#define ACCESSORY_START         53
+
+/* Control request for registering a HID device.
+ * Upon registering, a unique ID is sent by the accessory in the
+ * value parameter. This ID will be used for future commands for
+ * the device
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_REGISTER_HID_DEVICE
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          total length of the HID report descriptor
+ *     data            none
+ */
+#define ACCESSORY_REGISTER_HID         54
+
+/* Control request for unregistering a HID device.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_REGISTER_HID
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          0
+ *     data            none
+ */
+#define ACCESSORY_UNREGISTER_HID         55
+
+/* Control request for sending the HID report descriptor.
+ * If the HID descriptor is longer than the endpoint zero max packet size,
+ * the descriptor will be sent in multiple ACCESSORY_SET_HID_REPORT_DESC
+ * commands. The data for the descriptor must be sent sequentially
+ * if multiple packets are needed.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_SET_HID_REPORT_DESC
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          offset of data in descriptor
+ *                      (needed when HID descriptor is too big for one packet)
+ *     data            the HID report descriptor
+ */
+#define ACCESSORY_SET_HID_REPORT_DESC         56
+
+/* Control request for sending HID events.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_SEND_HID_EVENT
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          0
+ *     data            the HID report for the event
+ */
+#define ACCESSORY_SEND_HID_EVENT         57
+
+/* Control request for setting the audio mode.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_SET_AUDIO_MODE
+ *     value:          0 - no audio
+ *                     1 - device to host, 44100 16-bit stereo PCM
+ *     index:          0
+ *     data            none
+ */
+#define ACCESSORY_SET_AUDIO_MODE         58
+
+/* ioctls for retrieving strings set by the host */
+#define ACCESSORY_GET_STRING_MANUFACTURER   _IOW('M', 1, char[256])
+#define ACCESSORY_GET_STRING_MODEL          _IOW('M', 2, char[256])
+#define ACCESSORY_GET_STRING_DESCRIPTION    _IOW('M', 3, char[256])
+#define ACCESSORY_GET_STRING_VERSION        _IOW('M', 4, char[256])
+#define ACCESSORY_GET_STRING_URI            _IOW('M', 5, char[256])
+#define ACCESSORY_GET_STRING_SERIAL         _IOW('M', 6, char[256])
+/* returns 1 if there is a start request pending */
+#define ACCESSORY_IS_START_REQUESTED        _IO('M', 7)
+/* returns audio mode (set via the ACCESSORY_SET_AUDIO_MODE control request) */
+#define ACCESSORY_GET_AUDIO_MODE            _IO('M', 8)
+
+#endif /* _UAPI_LINUX_USB_F_ACCESSORY_H */
diff --git a/include/uapi/linux/usb/f_mtp.h b/include/uapi/linux/usb/f_mtp.h
new file mode 100644 (file)
index 0000000..5032918
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_USB_F_MTP_H
+#define _UAPI_LINUX_USB_F_MTP_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+struct mtp_file_range {
+       /* file descriptor for file to transfer */
+       int                     fd;
+       /* offset in file for start of transfer */
+       loff_t          offset;
+       /* number of bytes to transfer */
+       int64_t         length;
+       /* MTP command ID for data header,
+        * used only for MTP_SEND_FILE_WITH_HEADER
+        */
+       uint16_t        command;
+       /* MTP transaction ID for data header,
+        * used only for MTP_SEND_FILE_WITH_HEADER
+        */
+       uint32_t        transaction_id;
+};
+
+struct mtp_event {
+       /* size of the event */
+       size_t          length;
+       /* event data to send */
+       void            *data;
+};
+
+/* Sends the specified file range to the host */
+#define MTP_SEND_FILE              _IOW('M', 0, struct mtp_file_range)
+/* Receives data from the host and writes it to a file.
+ * The file is created if it does not exist.
+ */
+#define MTP_RECEIVE_FILE           _IOW('M', 1, struct mtp_file_range)
+/* Sends an event to the host via the interrupt endpoint */
+#define MTP_SEND_EVENT             _IOW('M', 3, struct mtp_event)
+/* Sends the specified file range to the host,
+ * with a 12 byte MTP data packet header at the beginning.
+ */
+#define MTP_SEND_FILE_WITH_HEADER  _IOW('M', 4, struct mtp_file_range)
+
+#endif /* _UAPI_LINUX_USB_F_MTP_H */
diff --git a/include/uapi/video/adf.h b/include/uapi/video/adf.h
new file mode 100644 (file)
index 0000000..c5d2e62
--- /dev/null
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_VIDEO_ADF_H_
+#define _UAPI_VIDEO_ADF_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_mode.h>
+
+#define ADF_NAME_LEN 32
+#define ADF_MAX_CUSTOM_DATA_SIZE 4096
+
+enum adf_interface_type {
+       ADF_INTF_DSI = 0,
+       ADF_INTF_eDP = 1,
+       ADF_INTF_DPI = 2,
+       ADF_INTF_VGA = 3,
+       ADF_INTF_DVI = 4,
+       ADF_INTF_HDMI = 5,
+       ADF_INTF_MEMORY = 6,
+       ADF_INTF_TYPE_DEVICE_CUSTOM = 128,
+       ADF_INTF_TYPE_MAX = (~(__u32)0),
+};
+
+#define ADF_INTF_FLAG_PRIMARY (1 << 0)
+#define ADF_INTF_FLAG_EXTERNAL (1 << 1)
+
+enum adf_event_type {
+       ADF_EVENT_VSYNC = 0,
+       ADF_EVENT_HOTPLUG = 1,
+       ADF_EVENT_DEVICE_CUSTOM = 128,
+       ADF_EVENT_TYPE_MAX = 255,
+};
+
+/**
+ * struct adf_set_event - start or stop subscribing to ADF events
+ *
+ * @type: the type of event to (un)subscribe
+ * @enabled: subscribe or unsubscribe
+ *
+ * After subscribing to an event, userspace may poll() the ADF object's fd
+ * to wait for events or read() to consume the event's data.
+ *
+ * ADF reserves event types 0 to %ADF_EVENT_DEVICE_CUSTOM-1 for its own events.
+ * Devices may use event types %ADF_EVENT_DEVICE_CUSTOM to %ADF_EVENT_TYPE_MAX-1
+ * for driver-private events.
+ */
+struct adf_set_event {
+       __u8 type;
+       __u8 enabled;
+};
+
+/**
+ * struct adf_event - common header for ADF event data
+ *
+ * @type: event type
+ * @length: total size of event data, header inclusive
+ */
+struct adf_event {
+       __u8 type;
+       __u32 length;
+};
+
+/**
+ * struct adf_vsync_event - ADF vsync event
+ *
+ * @base: event header (see &struct adf_event)
+ * @timestamp: time of vsync event, in nanoseconds
+ */
+struct adf_vsync_event {
+       struct adf_event base;
+       __aligned_u64 timestamp;
+};
+
+/**
+ * struct adf_vsync_event - ADF display hotplug event
+ *
+ * @base: event header (see &struct adf_event)
+ * @connected: whether a display is now connected to the interface
+ */
+struct adf_hotplug_event {
+       struct adf_event base;
+       __u8 connected;
+};
+
+#define ADF_MAX_PLANES 4
+/**
+ * struct adf_buffer_config - description of buffer displayed by adf_post_config
+ *
+ * @overlay_engine: id of the target overlay engine
+ * @w: width of display region in pixels
+ * @h: height of display region in pixels
+ * @format: DRM-style fourcc, see drm_fourcc.h for standard formats
+ * @fd: dma_buf fd for each plane
+ * @offset: location of first pixel to scan out, in bytes
+ * @pitch: stride (i.e. length of a scanline including padding) in bytes
+ * @n_planes: number of planes in buffer
+ * @acquire_fence: sync_fence fd which will clear when the buffer is
+ *     ready for display, or <0 if the buffer is already ready
+ */
+struct adf_buffer_config {
+       __u32 overlay_engine;
+
+       __u32 w;
+       __u32 h;
+       __u32 format;
+
+       __s32 fd[ADF_MAX_PLANES];
+       __u32 offset[ADF_MAX_PLANES];
+       __u32 pitch[ADF_MAX_PLANES];
+       __u8 n_planes;
+
+       __s32 acquire_fence;
+};
+#define ADF_MAX_BUFFERS (4096 / sizeof(struct adf_buffer_config))
+
+/**
+ * struct adf_post_config - request to flip to a new set of buffers
+ *
+ * @n_interfaces: number of interfaces targeted by the flip (input)
+ * @interfaces: ids of interfaces targeted by the flip (input)
+ * @n_bufs: number of buffers displayed (input)
+ * @bufs: description of buffers displayed (input)
+ * @custom_data_size: size of driver-private data (input)
+ * @custom_data: driver-private data (input)
+ * @complete_fence: sync_fence fd which will clear when this
+ *     configuration has left the screen (output)
+ */
+struct adf_post_config {
+       size_t n_interfaces;
+       __u32 __user *interfaces;
+
+       size_t n_bufs;
+       struct adf_buffer_config __user *bufs;
+
+       size_t custom_data_size;
+       void __user *custom_data;
+
+       __s32 complete_fence;
+};
+#define ADF_MAX_INTERFACES (4096 / sizeof(__u32))
+
+/**
+ * struct adf_simple_buffer_allocate - request to allocate a "simple" buffer
+ *
+ * @w: width of buffer in pixels (input)
+ * @h: height of buffer in pixels (input)
+ * @format: DRM-style fourcc (input)
+ *
+ * @fd: dma_buf fd (output)
+ * @offset: location of first pixel, in bytes (output)
+ * @pitch: length of a scanline including padding, in bytes (output)
+ *
+ * Simple buffers are analogous to DRM's "dumb" buffers.  They have a single
+ * plane of linear RGB data which can be allocated and scanned out without
+ * any driver-private ioctls or data.
+ *
+ * @format must be a standard RGB format defined in drm_fourcc.h.
+ *
+ * ADF clients must NOT assume that an interface can scan out a simple buffer
+ * allocated by a different ADF interface, even if the two interfaces belong to
+ * the same ADF device.
+ */
+struct adf_simple_buffer_alloc {
+       __u16 w;
+       __u16 h;
+       __u32 format;
+
+       __s32 fd;
+       __u32 offset;
+       __u32 pitch;
+};
+
+/**
+ * struct adf_simple_post_config - request to flip to a single buffer without
+ * driver-private data
+ *
+ * @buf: description of buffer displayed (input)
+ * @complete_fence: sync_fence fd which will clear when this buffer has left the
+ * screen (output)
+ */
+struct adf_simple_post_config {
+       struct adf_buffer_config buf;
+       __s32 complete_fence;
+};
+
+/**
+ * struct adf_attachment_config - description of attachment between an overlay
+ * engine and an interface
+ *
+ * @overlay_engine: id of the overlay engine
+ * @interface: id of the interface
+ */
+struct adf_attachment_config {
+       __u32 overlay_engine;
+       __u32 interface;
+};
+
+/**
+ * struct adf_device_data - describes a display device
+ *
+ * @name: display device's name
+ * @n_attachments: the number of current attachments
+ * @attachments: list of current attachments
+ * @n_allowed_attachments: the number of allowed attachments
+ * @allowed_attachments: list of allowed attachments
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ */
+struct adf_device_data {
+       char name[ADF_NAME_LEN];
+
+       size_t n_attachments;
+       struct adf_attachment_config __user *attachments;
+
+       size_t n_allowed_attachments;
+       struct adf_attachment_config __user *allowed_attachments;
+
+       size_t custom_data_size;
+       void __user *custom_data;
+};
+#define ADF_MAX_ATTACHMENTS (4096 / sizeof(struct adf_attachment_config))
+
+/**
+ * struct adf_device_data - describes a display interface
+ *
+ * @name: display interface's name
+ * @type: interface type (see enum @adf_interface_type)
+ * @id: which interface of type @type;
+ *     e.g. interface DSI.1 -> @type=@ADF_INTF_TYPE_DSI, @id=1
+ * @flags: informational flags (bitmask of %ADF_INTF_FLAG_* values)
+ * @dpms_state: DPMS state (one of @DRM_MODE_DPMS_* defined in drm_mode.h)
+ * @hotplug_detect: whether a display is plugged in
+ * @width_mm: screen width in millimeters, or 0 if unknown
+ * @height_mm: screen height in millimeters, or 0 if unknown
+ * @current_mode: current display mode
+ * @n_available_modes: the number of hardware display modes
+ * @available_modes: list of hardware display modes
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ */
+struct adf_interface_data {
+       char name[ADF_NAME_LEN];
+
+       __u32 type;
+       __u32 id;
+       /* e.g. type=ADF_INTF_TYPE_DSI, id=1 => DSI.1 */
+       __u32 flags;
+
+       __u8 dpms_state;
+       __u8 hotplug_detect;
+       __u16 width_mm;
+       __u16 height_mm;
+
+       struct drm_mode_modeinfo current_mode;
+       size_t n_available_modes;
+       struct drm_mode_modeinfo __user *available_modes;
+
+       size_t custom_data_size;
+       void __user *custom_data;
+};
+#define ADF_MAX_MODES (4096 / sizeof(struct drm_mode_modeinfo))
+
+/**
+ * struct adf_overlay_engine_data - describes an overlay engine
+ *
+ * @name: overlay engine's name
+ * @n_supported_formats: number of supported formats
+ * @supported_formats: list of supported formats
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ */
+struct adf_overlay_engine_data {
+       char name[ADF_NAME_LEN];
+
+       size_t n_supported_formats;
+       __u32 __user *supported_formats;
+
+       size_t custom_data_size;
+       void __user *custom_data;
+};
+#define ADF_MAX_SUPPORTED_FORMATS (4096 / sizeof(__u32))
+
+#define ADF_IOCTL_TYPE         'D'
+#define ADF_IOCTL_NR_CUSTOM    128
+
+#define ADF_SET_EVENT          _IOW(ADF_IOCTL_TYPE, 0, struct adf_set_event)
+#define ADF_BLANK              _IOW(ADF_IOCTL_TYPE, 1, __u8)
+#define ADF_POST_CONFIG                _IOW(ADF_IOCTL_TYPE, 2, struct adf_post_config)
+#define ADF_SET_MODE           _IOW(ADF_IOCTL_TYPE, 3, \
+                                       struct drm_mode_modeinfo)
+#define ADF_GET_DEVICE_DATA    _IOR(ADF_IOCTL_TYPE, 4, struct adf_device_data)
+#define ADF_GET_INTERFACE_DATA _IOR(ADF_IOCTL_TYPE, 5, \
+                                       struct adf_interface_data)
+#define ADF_GET_OVERLAY_ENGINE_DATA \
+                               _IOR(ADF_IOCTL_TYPE, 6, \
+                                       struct adf_overlay_engine_data)
+#define ADF_SIMPLE_POST_CONFIG _IOW(ADF_IOCTL_TYPE, 7, \
+                                       struct adf_simple_post_config)
+#define ADF_SIMPLE_BUFFER_ALLOC        _IOW(ADF_IOCTL_TYPE, 8, \
+                                       struct adf_simple_buffer_alloc)
+#define ADF_ATTACH             _IOW(ADF_IOCTL_TYPE, 9, \
+                                       struct adf_attachment_config)
+#define ADF_DETACH             _IOW(ADF_IOCTL_TYPE, 10, \
+                                       struct adf_attachment_config)
+
+#endif /* _UAPI_VIDEO_ADF_H_ */
diff --git a/include/video/adf.h b/include/video/adf.h
new file mode 100644 (file)
index 0000000..34f10e5
--- /dev/null
@@ -0,0 +1,502 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_H
+#define _VIDEO_ADF_H
+
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <uapi/video/adf.h>
+#include "sync.h"
+
+struct adf_obj;
+struct adf_obj_ops;
+struct adf_device;
+struct adf_device_ops;
+struct adf_interface;
+struct adf_interface_ops;
+struct adf_overlay_engine;
+struct adf_overlay_engine_ops;
+
+/**
+ * struct adf_buffer - buffer displayed by adf_post
+ *
+ * @overlay_engine: target overlay engine
+ * @w: width of display region in pixels
+ * @h: height of display region in pixels
+ * @format: DRM-style fourcc, see drm_fourcc.h for standard formats
+ * @dma_bufs: dma_buf for each plane
+ * @offset: location of first pixel to scan out, in bytes
+ * @pitch: length of a scanline including padding, in bytes
+ * @n_planes: number of planes in buffer
+ * @acquire_fence: sync_fence which will clear when the buffer is
+ *     ready for display
+ *
+ * &struct adf_buffer is the in-kernel counterpart to the userspace-facing
+ * &struct adf_buffer_config.
+ */
+struct adf_buffer {
+       struct adf_overlay_engine *overlay_engine;
+
+       u32 w;
+       u32 h;
+       u32 format;
+
+       struct dma_buf *dma_bufs[ADF_MAX_PLANES];
+       u32 offset[ADF_MAX_PLANES];
+       u32 pitch[ADF_MAX_PLANES];
+       u8 n_planes;
+
+       struct sync_fence *acquire_fence;
+};
+
+/**
+ * struct adf_buffer_mapping - state for mapping a &struct adf_buffer into the
+ * display device
+ *
+ * @attachments: dma-buf attachment for each plane
+ * @sg_tables: SG tables for each plane
+ */
+struct adf_buffer_mapping {
+       struct dma_buf_attachment *attachments[ADF_MAX_PLANES];
+       struct sg_table *sg_tables[ADF_MAX_PLANES];
+};
+
+/**
+ * struct adf_post - request to flip to a new set of buffers
+ *
+ * @n_bufs: number of buffers displayed
+ * @bufs: buffers displayed
+ * @mappings: in-device mapping state for each buffer
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ *
+ * &struct adf_post is the in-kernel counterpart to the userspace-facing
+ * &struct adf_post_config.
+ */
+struct adf_post {
+       size_t n_bufs;
+       struct adf_buffer *bufs;
+       struct adf_buffer_mapping *mappings;
+
+       size_t custom_data_size;
+       void *custom_data;
+};
+
+/**
+ * struct adf_attachment - description of attachment between an overlay engine
+ * and an interface
+ *
+ * @overlay_engine: the overlay engine
+ * @interface: the interface
+ *
+ * &struct adf_attachment is the in-kernel counterpart to the userspace-facing
+ * &struct adf_attachment_config.
+ */
+struct adf_attachment {
+       struct adf_overlay_engine *overlay_engine;
+       struct adf_interface *interface;
+};
+
+struct adf_pending_post {
+       struct list_head head;
+       struct adf_post config;
+       void *state;
+};
+
+enum adf_obj_type {
+       ADF_OBJ_OVERLAY_ENGINE = 0,
+       ADF_OBJ_INTERFACE = 1,
+       ADF_OBJ_DEVICE = 2,
+};
+
+/**
+ * struct adf_obj_ops - common ADF object implementation ops
+ *
+ * @open: handle opening the object's device node
+ * @release: handle releasing an open file
+ * @ioctl: handle custom ioctls
+ *
+ * @supports_event: return whether the object supports generating events of type
+ *     @type
+ * @set_event: enable or disable events of type @type
+ * @event_type_str: return a string representation of custom event @type
+ *     (@type >= %ADF_EVENT_DEVICE_CUSTOM).
+ *
+ * @custom_data: copy up to %ADF_MAX_CUSTOM_DATA_SIZE bytes of driver-private
+ *     data into @data (allocated by ADF) and return the number of copied bytes
+ *     in @size.  Return 0 on success or an error code (<0) on failure.
+ */
+struct adf_obj_ops {
+       /* optional */
+       int (*open)(struct adf_obj *obj, struct inode *inode,
+                       struct file *file);
+       /* optional */
+       void (*release)(struct adf_obj *obj, struct inode *inode,
+                       struct file *file);
+       /* optional */
+       long (*ioctl)(struct adf_obj *obj, unsigned int cmd, unsigned long arg);
+
+       /* optional */
+       bool (*supports_event)(struct adf_obj *obj, enum adf_event_type type);
+       /* required if supports_event is implemented */
+       void (*set_event)(struct adf_obj *obj, enum adf_event_type type,
+                       bool enabled);
+       /* optional */
+       const char *(*event_type_str)(struct adf_obj *obj,
+                       enum adf_event_type type);
+
+       /* optional */
+       int (*custom_data)(struct adf_obj *obj, void *data, size_t *size);
+};
+
+struct adf_obj {
+       enum adf_obj_type type;
+       char name[ADF_NAME_LEN];
+       struct adf_device *parent;
+
+       const struct adf_obj_ops *ops;
+
+       struct device dev;
+
+       struct spinlock file_lock;
+       struct list_head file_list;
+
+       struct mutex event_lock;
+       struct rb_root event_refcount;
+
+       int id;
+       int minor;
+};
+
+/**
+ * struct adf_device_quirks - common display device quirks
+ *
+ * @buffer_padding: whether the last scanline of a buffer extends to the
+ *     buffer's pitch (@ADF_BUFFER_PADDED_TO_PITCH) or just to the visible
+ *     width (@ADF_BUFFER_UNPADDED)
+ */
+struct adf_device_quirks {
+       /* optional, defaults to ADF_BUFFER_PADDED_TO_PITCH */
+       enum {
+               ADF_BUFFER_PADDED_TO_PITCH = 0,
+               ADF_BUFFER_UNPADDED = 1,
+       } buffer_padding;
+};
+
+/**
+ * struct adf_device_ops - display device implementation ops
+ *
+ * @owner: device's module
+ * @base: common operations (see &struct adf_obj_ops)
+ * @quirks: device's quirks (see &struct adf_device_quirks)
+ *
+ * @attach: attach overlay engine @eng to interface @intf.  Return 0 on success
+ *     or error code (<0) on failure.
+ * @detach: detach overlay engine @eng from interface @intf.  Return 0 on
+ *     success or error code (<0) on failure.
+ *
+ * @validate_custom_format: validate the number and size of planes
+ *     in buffers with a custom format (i.e., not one of the @DRM_FORMAT_*
+ *     types defined in drm/drm_fourcc.h).  Return 0 if the buffer is valid or
+ *     an error code (<0) otherwise.
+ *
+ * @validate: validate that the proposed configuration @cfg is legal.  The
+ *     driver may optionally allocate and return some driver-private state in
+ *     @driver_state, which will be passed to the corresponding post().  The
+ *     driver may NOT commit any changes to hardware.  Return 0 if @cfg is
+ *     valid or an error code (<0) otherwise.
+ * @complete_fence: create a hardware-backed sync fence to be signaled when
+ *     @cfg is removed from the screen.  If unimplemented, ADF automatically
+ *     creates an sw_sync fence.  Return the sync fence on success or a
+ *     PTR_ERR() on failure.
+ * @post: flip @cfg onto the screen.  Wait for the display to begin scanning out
+ *     @cfg before returning.
+ * @advance_timeline: signal the sync fence for the last configuration to leave
+ *     the display.  If unimplemented, ADF automatically advances an sw_sync
+ *     timeline.
+ * @state_free: free driver-private state allocated during validate()
+ */
+struct adf_device_ops {
+       /* required */
+       struct module *owner;
+       const struct adf_obj_ops base;
+       /* optional */
+       const struct adf_device_quirks quirks;
+
+       /* optional */
+       int (*attach)(struct adf_device *dev, struct adf_overlay_engine *eng,
+                       struct adf_interface *intf);
+       /* optional */
+       int (*detach)(struct adf_device *dev, struct adf_overlay_engine *eng,
+                       struct adf_interface *intf);
+
+       /* required if any of the device's overlay engines supports at least one
+          custom format */
+       int (*validate_custom_format)(struct adf_device *dev,
+                       struct adf_buffer *buf);
+
+       /* required */
+       int (*validate)(struct adf_device *dev, struct adf_post *cfg,
+                       void **driver_state);
+       /* optional */
+       struct sync_fence *(*complete_fence)(struct adf_device *dev,
+                       struct adf_post *cfg, void *driver_state);
+       /* required */
+       void (*post)(struct adf_device *dev, struct adf_post *cfg,
+                       void *driver_state);
+       /* required if complete_fence is implemented */
+       void (*advance_timeline)(struct adf_device *dev,
+                       struct adf_post *cfg, void *driver_state);
+       /* required if validate allocates driver state */
+       void (*state_free)(struct adf_device *dev, void *driver_state);
+};
+
+struct adf_attachment_list {
+       struct adf_attachment attachment;
+       struct list_head head;
+};
+
+struct adf_device {
+       struct adf_obj base;
+       struct device *dev;
+
+       const struct adf_device_ops *ops;
+
+       struct mutex client_lock;
+
+       struct idr interfaces;
+       size_t n_interfaces;
+       struct idr overlay_engines;
+
+       struct list_head post_list;
+       struct mutex post_lock;
+       struct kthread_worker post_worker;
+       struct task_struct *post_thread;
+       struct kthread_work post_work;
+
+       struct list_head attached;
+       size_t n_attached;
+       struct list_head attach_allowed;
+       size_t n_attach_allowed;
+
+       struct adf_pending_post *onscreen;
+
+       struct sw_sync_timeline *timeline;
+       int timeline_max;
+};
+
+/**
+ * struct adf_interface_ops - display interface implementation ops
+ *
+ * @base: common operations (see &struct adf_obj_ops)
+ *
+ * @blank: change the display's DPMS state.  Return 0 on success or error
+ *     code (<0) on failure.
+ *
+ * @alloc_simple_buffer: allocate a buffer with the specified @w, @h, and
+ *     @format.  @format will be a standard RGB format (i.e.,
+ *     adf_format_is_rgb(@format) == true).  Return 0 on success or error code
+ *     (<0) on failure.  On success, return the buffer, offset, and pitch in
+ *     @dma_buf, @offset, and @pitch respectively.
+ * @describe_simple_post: provide driver-private data needed to post a single
+ *     buffer @buf.  Copy up to ADF_MAX_CUSTOM_DATA_SIZE bytes into @data
+ *     (allocated by ADF) and return the number of bytes in @size.  Return 0 on
+ *     success or error code (<0) on failure.
+ *
+ * @modeset: change the interface's mode.  @mode is not necessarily part of the
+ *     modelist passed to adf_hotplug_notify_connected(); the driver may
+ *     accept or reject custom modes at its discretion.  Return 0 on success or
+ *     error code (<0) if the mode could not be set.
+ *
+ * @screen_size: copy the screen dimensions in millimeters into @width_mm
+ *     and @height_mm.  Return 0 on success or error code (<0) if the display
+ *     dimensions are unknown.
+ *
+ * @type_str: return a string representation of custom @intf->type
+ *     (@intf->type >= @ADF_INTF_TYPE_DEVICE_CUSTOM).
+ */
+struct adf_interface_ops {
+       const struct adf_obj_ops base;
+
+       /* optional */
+       int (*blank)(struct adf_interface *intf, u8 state);
+
+       /* optional */
+       int (*alloc_simple_buffer)(struct adf_interface *intf,
+                       u16 w, u16 h, u32 format,
+                       struct dma_buf **dma_buf, u32 *offset, u32 *pitch);
+       /* optional */
+       int (*describe_simple_post)(struct adf_interface *intf,
+                       struct adf_buffer *fb, void *data, size_t *size);
+
+       /* optional */
+       int (*modeset)(struct adf_interface *intf,
+                       struct drm_mode_modeinfo *mode);
+
+       /* optional */
+       int (*screen_size)(struct adf_interface *intf, u16 *width_mm,
+                       u16 *height_mm);
+
+       /* optional */
+       const char *(*type_str)(struct adf_interface *intf);
+};
+
+struct adf_interface {
+       struct adf_obj base;
+       const struct adf_interface_ops *ops;
+
+       struct drm_mode_modeinfo current_mode;
+
+       enum adf_interface_type type;
+       u32 idx;
+       u32 flags;
+
+       wait_queue_head_t vsync_wait;
+       ktime_t vsync_timestamp;
+       rwlock_t vsync_lock;
+
+       u8 dpms_state;
+
+       bool hotplug_detect;
+       struct drm_mode_modeinfo *modelist;
+       size_t n_modes;
+       rwlock_t hotplug_modelist_lock;
+};
+
+/**
+ * struct adf_interface_ops - overlay engine implementation ops
+ *
+ * @base: common operations (see &struct adf_obj_ops)
+ *
+ * @supported_formats: list of fourccs the overlay engine can scan out
+ * @n_supported_formats: length of supported_formats, up to
+ *     ADF_MAX_SUPPORTED_FORMATS
+ */
+struct adf_overlay_engine_ops {
+       const struct adf_obj_ops base;
+
+       /* required */
+       const u32 *supported_formats;
+       /* required */
+       const size_t n_supported_formats;
+};
+
+struct adf_overlay_engine {
+       struct adf_obj base;
+
+       const struct adf_overlay_engine_ops *ops;
+};
+
+#define adf_obj_to_device(ptr) \
+       container_of((ptr), struct adf_device, base)
+
+#define adf_obj_to_interface(ptr) \
+       container_of((ptr), struct adf_interface, base)
+
+#define adf_obj_to_overlay_engine(ptr) \
+       container_of((ptr), struct adf_overlay_engine, base)
+
+int __printf(4, 5) adf_device_init(struct adf_device *dev,
+               struct device *parent, const struct adf_device_ops *ops,
+               const char *fmt, ...);
+void adf_device_destroy(struct adf_device *dev);
+int __printf(7, 8) adf_interface_init(struct adf_interface *intf,
+               struct adf_device *dev, enum adf_interface_type type, u32 idx,
+               u32 flags, const struct adf_interface_ops *ops, const char *fmt,
+               ...);
+void adf_interface_destroy(struct adf_interface *intf);
+static inline struct adf_device *adf_interface_parent(
+               struct adf_interface *intf)
+{
+       return intf->base.parent;
+}
+int __printf(4, 5) adf_overlay_engine_init(struct adf_overlay_engine *eng,
+               struct adf_device *dev,
+               const struct adf_overlay_engine_ops *ops, const char *fmt, ...);
+void adf_overlay_engine_destroy(struct adf_overlay_engine *eng);
+static inline struct adf_device *adf_overlay_engine_parent(
+               struct adf_overlay_engine *eng)
+{
+       return eng->base.parent;
+}
+
+int adf_attachment_allow(struct adf_device *dev, struct adf_overlay_engine *eng,
+               struct adf_interface *intf);
+
+const char *adf_obj_type_str(enum adf_obj_type type);
+const char *adf_interface_type_str(struct adf_interface *intf);
+const char *adf_event_type_str(struct adf_obj *obj, enum adf_event_type type);
+
+#define ADF_FORMAT_STR_SIZE 5
+void adf_format_str(u32 format, char buf[ADF_FORMAT_STR_SIZE]);
+int adf_format_validate_yuv(struct adf_device *dev, struct adf_buffer *buf,
+               u8 num_planes, u8 hsub, u8 vsub, u8 cpp[]);
+/**
+ * adf_format_validate_rgb - validate the number and size of planes in buffers
+ * with a custom RGB format.
+ *
+ * @dev: ADF device performing the validation
+ * @buf: buffer to validate
+ * @cpp: expected bytes per pixel
+ *
+ * adf_format_validate_rgb() is intended to be called as a helper from @dev's
+ * validate_custom_format() op.  @buf must have a single RGB plane.
+ *
+ * Returns 0 if @buf has a single plane with sufficient size, or -EINVAL
+ * otherwise.
+ */
+static inline int adf_format_validate_rgb(struct adf_device *dev,
+               struct adf_buffer *buf, u8 cpp)
+{
+       return adf_format_validate_yuv(dev, buf, 1, 1, 1, &cpp);
+}
+
+int adf_event_get(struct adf_obj *obj, enum adf_event_type type);
+int adf_event_put(struct adf_obj *obj, enum adf_event_type type);
+int adf_event_notify(struct adf_obj *obj, struct adf_event *event);
+
+static inline void adf_vsync_get(struct adf_interface *intf)
+{
+       adf_event_get(&intf->base, ADF_EVENT_VSYNC);
+}
+
+static inline void adf_vsync_put(struct adf_interface *intf)
+{
+       adf_event_put(&intf->base, ADF_EVENT_VSYNC);
+}
+
+int adf_vsync_wait(struct adf_interface *intf, long timeout);
+void adf_vsync_notify(struct adf_interface *intf, ktime_t timestamp);
+
+int adf_hotplug_notify_connected(struct adf_interface *intf,
+               struct drm_mode_modeinfo *modelist, size_t n_modes);
+void adf_hotplug_notify_disconnected(struct adf_interface *intf);
+
+void adf_modeinfo_set_name(struct drm_mode_modeinfo *mode);
+void adf_modeinfo_set_vrefresh(struct drm_mode_modeinfo *mode);
+
+#endif /* _VIDEO_ADF_H */
diff --git a/include/video/adf_client.h b/include/video/adf_client.h
new file mode 100644 (file)
index 0000000..983f2b6
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_CLIENT_H_
+#define _VIDEO_ADF_CLIENT_H_
+
+#include <video/adf.h>
+
+int adf_interface_blank(struct adf_interface *intf, u8 state);
+u8 adf_interface_dpms_state(struct adf_interface *intf);
+
+void adf_interface_current_mode(struct adf_interface *intf,
+               struct drm_mode_modeinfo *mode);
+size_t adf_interface_modelist(struct adf_interface *intf,
+               struct drm_mode_modeinfo *modelist, size_t n_modes);
+int adf_interface_set_mode(struct adf_interface *intf,
+               struct drm_mode_modeinfo *mode);
+int adf_interface_get_screen_size(struct adf_interface *intf, u16 *width,
+               u16 *height);
+int adf_interface_simple_buffer_alloc(struct adf_interface *intf, u16 w, u16 h,
+               u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch);
+struct sync_fence *adf_interface_simple_post(struct adf_interface *intf,
+               struct adf_buffer *buf);
+
+bool adf_overlay_engine_supports_format(struct adf_overlay_engine *eng,
+               u32 format);
+
+size_t adf_device_attachments(struct adf_device *dev,
+               struct adf_attachment *attachments, size_t n_attachments);
+size_t adf_device_attachments_allowed(struct adf_device *dev,
+               struct adf_attachment *attachments, size_t n_attachments);
+bool adf_device_attached(struct adf_device *dev, struct adf_overlay_engine *eng,
+               struct adf_interface *intf);
+bool adf_device_attach_allowed(struct adf_device *dev,
+               struct adf_overlay_engine *eng, struct adf_interface *intf);
+int adf_device_attach(struct adf_device *dev, struct adf_overlay_engine *eng,
+               struct adf_interface *intf);
+int adf_device_detach(struct adf_device *dev, struct adf_overlay_engine *eng,
+               struct adf_interface *intf);
+
+struct sync_fence *adf_device_post(struct adf_device *dev,
+               struct adf_interface **intfs, size_t n_intfs,
+               struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
+               size_t custom_data_size);
+struct sync_fence *adf_device_post_nocopy(struct adf_device *dev,
+               struct adf_interface **intfs, size_t n_intfs,
+               struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
+               size_t custom_data_size);
+
+#endif /* _VIDEO_ADF_CLIENT_H_ */
diff --git a/include/video/adf_fbdev.h b/include/video/adf_fbdev.h
new file mode 100644 (file)
index 0000000..b722c6b
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_FBDEV_H_
+#define _VIDEO_ADF_FBDEV_H_
+
+#include <linux/fb.h>
+#include <linux/mutex.h>
+#include <video/adf.h>
+
+struct adf_fbdev {
+       struct adf_interface *intf;
+       struct adf_overlay_engine *eng;
+       struct fb_info *info;
+       u32 pseudo_palette[16];
+
+       unsigned int refcount;
+       struct mutex refcount_lock;
+
+       struct dma_buf *dma_buf;
+       u32 offset;
+       u32 pitch;
+       void *vaddr;
+       u32 format;
+
+       u16 default_xres_virtual;
+       u16 default_yres_virtual;
+       u32 default_format;
+};
+
+#if IS_ENABLED(CONFIG_ADF_FBDEV)
+void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
+               struct fb_videomode *vmode);
+void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode,
+               struct drm_mode_modeinfo *mode);
+
+int adf_fbdev_init(struct adf_fbdev *fbdev, struct adf_interface *interface,
+               struct adf_overlay_engine *eng,
+               u16 xres_virtual, u16 yres_virtual, u32 format,
+               struct fb_ops *fbops, const char *fmt, ...);
+void adf_fbdev_destroy(struct adf_fbdev *fbdev);
+
+int adf_fbdev_open(struct fb_info *info, int user);
+int adf_fbdev_release(struct fb_info *info, int user);
+int adf_fbdev_check_var(struct fb_var_screeninfo *var, struct fb_info *info);
+int adf_fbdev_set_par(struct fb_info *info);
+int adf_fbdev_blank(int blank, struct fb_info *info);
+int adf_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *info);
+int adf_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
+#else
+static inline void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
+               struct fb_videomode *vmode)
+{
+       WARN_ONCE(1, "%s: CONFIG_ADF_FBDEV is disabled\n", __func__);
+}
+
+static inline void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode,
+               struct drm_mode_modeinfo *mode)
+{
+       WARN_ONCE(1, "%s: CONFIG_ADF_FBDEV is disabled\n", __func__);
+}
+
+static inline int adf_fbdev_init(struct adf_fbdev *fbdev,
+               struct adf_interface *interface,
+               struct adf_overlay_engine *eng,
+               u16 xres_virtual, u16 yres_virtual, u32 format,
+               struct fb_ops *fbops, const char *fmt, ...)
+{
+       return -ENODEV;
+}
+
+static inline void adf_fbdev_destroy(struct adf_fbdev *fbdev) { }
+
+static inline int adf_fbdev_open(struct fb_info *info, int user)
+{
+       return -ENODEV;
+}
+
+static inline int adf_fbdev_release(struct fb_info *info, int user)
+{
+       return -ENODEV;
+}
+
+static inline int adf_fbdev_check_var(struct fb_var_screeninfo *var,
+               struct fb_info *info)
+{
+       return -ENODEV;
+}
+
+static inline int adf_fbdev_set_par(struct fb_info *info)
+{
+       return -ENODEV;
+}
+
+static inline int adf_fbdev_blank(int blank, struct fb_info *info)
+{
+       return -ENODEV;
+}
+
+static inline int adf_fbdev_pan_display(struct fb_var_screeninfo *var,
+               struct fb_info *info)
+{
+       return -ENODEV;
+}
+
+static inline int adf_fbdev_mmap(struct fb_info *info,
+               struct vm_area_struct *vma)
+{
+       return -ENODEV;
+}
+#endif
+
+#endif /* _VIDEO_ADF_FBDEV_H_ */
diff --git a/include/video/adf_format.h b/include/video/adf_format.h
new file mode 100644 (file)
index 0000000..e03182c
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_FORMAT_H
+#define _VIDEO_ADF_FORMAT_H
+
+bool adf_format_is_standard(u32 format);
+bool adf_format_is_rgb(u32 format);
+u8 adf_format_num_planes(u32 format);
+u8 adf_format_bpp(u32 format);
+u8 adf_format_plane_cpp(u32 format, int plane);
+u8 adf_format_horz_chroma_subsampling(u32 format);
+u8 adf_format_vert_chroma_subsampling(u32 format);
+
+#endif /* _VIDEO_ADF_FORMAT_H */
diff --git a/include/video/adf_memblock.h b/include/video/adf_memblock.h
new file mode 100644 (file)
index 0000000..6256e0e
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_MEMBLOCK_H_
+#define _VIDEO_ADF_MEMBLOCK_H_
+
+struct dma_buf *adf_memblock_export(phys_addr_t base, size_t size, int flags);
+
+#endif /* _VIDEO_ADF_MEMBLOCK_H_ */
index e1d1d6936f9228ad1580144563c27a8b2d8d2dd1..acb6645ffda54b44c3e8350d9b67319a98929a76 100644 (file)
@@ -392,6 +392,15 @@ config IRQ_TIME_ACCOUNTING
 
 endchoice
 
+config SCHED_WALT
+        bool "Support window based load tracking"
+        depends on SMP
+        help
+        This feature will allow the scheduler to maintain a tunable window
+       based set of metrics for tasks and runqueues. These metrics can be
+       used to guide task placement as well as task frequency requirements
+       for cpufreq governors.
+
 config BSD_PROCESS_ACCT
        bool "BSD Process Accounting"
        depends on MULTIUSER
@@ -999,6 +1008,23 @@ config CGROUP_CPUACCT
          Provides a simple Resource Controller for monitoring the
          total CPU consumed by the tasks in a cgroup.
 
+config CGROUP_SCHEDTUNE
+       bool "CFS tasks boosting cgroup subsystem (EXPERIMENTAL)"
+       depends on SCHED_TUNE
+       help
+         This option provides the "schedtune" controller which improves the
+         flexibility of the task boosting mechanism by introducing the support
+         to define "per task" boost values.
+
+         This new controller:
+         1. allows only a two layers hierarchy, where the root defines the
+            system-wide boost value and its direct childrens define each one a
+            different "class of tasks" to be boosted with a different value
+         2. supports up to 16 different task classes, each one which could be
+            configured with a different boost value
+
+         Say N if unsure.
+
 config PAGE_COUNTER
        bool
 
@@ -1237,6 +1263,43 @@ config SCHED_AUTOGROUP
          desktop applications.  Task group autogeneration is currently based
          upon task session.
 
+config SCHED_TUNE
+       bool "Boosting for CFS tasks (EXPERIMENTAL)"
+       depends on SMP
+       help
+         This option enables the system-wide support for task boosting.
+         When this support is enabled a new sysctl interface is exposed to
+         userspace via:
+            /proc/sys/kernel/sched_cfs_boost
+         which allows to set a system-wide boost value in range [0..100].
+
+         The currently boosting strategy is implemented in such a way that:
+         - a 0% boost value requires to operate in "standard" mode by
+           scheduling all tasks at the minimum capacities required by their
+           workload demand
+         - a 100% boost value requires to push at maximum the task
+           performances, "regardless" of the incurred energy consumption
+
+         A boost value in between these two boundaries is used to bias the
+         power/performance trade-off, the higher the boost value the more the
+         scheduler is biased toward performance boosting instead of energy
+         efficiency.
+
+         Since this support exposes a single system-wide knob, the specified
+         boost value is applied to all (CFS) tasks in the system.
+
+         If unsure, say N.
+
+config DEFAULT_USE_ENERGY_AWARE
+       bool "Default to enabling the Energy Aware Scheduler feature"
+       default n
+       help
+         This option defaults the ENERGY_AWARE scheduling feature to true,
+         as without SCHED_DEBUG set this feature can't be enabled or disabled
+         via sysctl.
+
+         Say N if unsure.
+
 config SYSFS_DEPRECATED
        bool "Enable deprecated sysfs features to support old userspace tools"
        depends on SYSFS
index 7bc47ee31c369d442676edba32233fbb2ddbb462..243f61de2cbabbff8943c70d2688f54d4cd18c8a 100644 (file)
@@ -3,11 +3,8 @@
 #
 
 obj-y                          := main.o version.o mounts.o
-ifneq ($(CONFIG_BLK_DEV_INITRD),y)
 obj-y                          += noinitramfs.o
-else
 obj-$(CONFIG_BLK_DEV_INITRD)   += initramfs.o
-endif
 obj-$(CONFIG_GENERIC_CALIBRATE_DELAY) += calibrate.o
 
 ifneq ($(CONFIG_ARCH_INIT_TASK),y)
@@ -18,6 +15,7 @@ mounts-y                      := do_mounts.o
 mounts-$(CONFIG_BLK_DEV_RAM)   += do_mounts_rd.o
 mounts-$(CONFIG_BLK_DEV_INITRD)        += do_mounts_initrd.o
 mounts-$(CONFIG_BLK_DEV_MD)    += do_mounts_md.o
+mounts-$(CONFIG_BLK_DEV_DM)    += do_mounts_dm.o
 
 # dependencies on generated files need to be listed explicitly
 $(obj)/version.o: include/generated/compile.h
index dea5de95c2dd23771f44e58ada2d9b1dbfa1c5df..1902a1c808319f6e84aa13944ba44ede13a9b02e 100644 (file)
@@ -566,6 +566,7 @@ void __init prepare_namespace(void)
        wait_for_device_probe();
 
        md_run_setup();
+       dm_run_setup();
 
        if (saved_root_name[0]) {
                root_device_name = saved_root_name;
index f5b978a9bb92892a5e876ae3ce1338ad8a896e04..09d22862e8c38d49339a1ea09f01288eed5b60b3 100644 (file)
@@ -74,3 +74,13 @@ void md_run_setup(void);
 static inline void md_run_setup(void) {}
 
 #endif
+
+#ifdef CONFIG_BLK_DEV_DM
+
+void dm_run_setup(void);
+
+#else
+
+static inline void dm_run_setup(void) {}
+
+#endif
diff --git a/init/do_mounts_dm.c b/init/do_mounts_dm.c
new file mode 100644 (file)
index 0000000..ecda58d
--- /dev/null
@@ -0,0 +1,426 @@
+/* do_mounts_dm.c
+ * Copyright (C) 2010 The Chromium OS Authors <chromium-os-dev@chromium.org>
+ *                    All Rights Reserved.
+ * Based on do_mounts_md.c
+ *
+ * This file is released under the GPL.
+ */
+#include <linux/device-mapper.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+
+#include "do_mounts.h"
+#include "../drivers/md/dm.h"
+
+#define DM_MAX_NAME 32
+#define DM_MAX_UUID 129
+#define DM_NO_UUID "none"
+
+#define DM_MSG_PREFIX "init"
+
+/* Separators used for parsing the dm= argument. */
+#define DM_FIELD_SEP ' '
+#define DM_LINE_SEP ','
+
+/*
+ * When the device-mapper and any targets are compiled into the kernel
+ * (not a module), one target may be created and used as the root device at
+ * boot time with the parameters given with the boot line dm=...
+ * The code for that is here.
+ */
+
+struct dm_setup_target {
+       sector_t begin;
+       sector_t length;
+       char *type;
+       char *params;
+       /* simple singly linked list */
+       struct dm_setup_target *next;
+};
+
+static struct {
+       int minor;
+       int ro;
+       char name[DM_MAX_NAME];
+       char uuid[DM_MAX_UUID];
+       char *targets;
+       struct dm_setup_target *target;
+       int target_count;
+} dm_setup_args __initdata;
+
+static __initdata int dm_early_setup;
+
+static size_t __init get_dm_option(char *str, char **next, char sep)
+{
+       size_t len = 0;
+       char *endp = NULL;
+
+       if (!str)
+               return 0;
+
+       endp = strchr(str, sep);
+       if (!endp) {  /* act like strchrnul */
+               len = strlen(str);
+               endp = str + len;
+       } else {
+               len = endp - str;
+       }
+
+       if (endp == str)
+               return 0;
+
+       if (!next)
+               return len;
+
+       if (*endp == 0) {
+               /* Don't advance past the nul. */
+               *next = endp;
+       } else {
+               *next = endp + 1;
+       }
+       return len;
+}
+
+static int __init dm_setup_args_init(void)
+{
+       dm_setup_args.minor = 0;
+       dm_setup_args.ro = 0;
+       dm_setup_args.target = NULL;
+       dm_setup_args.target_count = 0;
+       return 0;
+}
+
+static int __init dm_setup_cleanup(void)
+{
+       struct dm_setup_target *target = dm_setup_args.target;
+       struct dm_setup_target *old_target = NULL;
+       while (target) {
+               kfree(target->type);
+               kfree(target->params);
+               old_target = target;
+               target = target->next;
+               kfree(old_target);
+               dm_setup_args.target_count--;
+       }
+       BUG_ON(dm_setup_args.target_count);
+       return 0;
+}
+
+static char * __init dm_setup_parse_device_args(char *str)
+{
+       char *next = NULL;
+       size_t len = 0;
+
+       /* Grab the logical name of the device to be exported to udev */
+       len = get_dm_option(str, &next, DM_FIELD_SEP);
+       if (!len) {
+               DMERR("failed to parse device name");
+               goto parse_fail;
+       }
+       len = min(len + 1, sizeof(dm_setup_args.name));
+       strlcpy(dm_setup_args.name, str, len);  /* includes nul */
+       str = skip_spaces(next);
+
+       /* Grab the UUID value or "none" */
+       len = get_dm_option(str, &next, DM_FIELD_SEP);
+       if (!len) {
+               DMERR("failed to parse device uuid");
+               goto parse_fail;
+       }
+       len = min(len + 1, sizeof(dm_setup_args.uuid));
+       strlcpy(dm_setup_args.uuid, str, len);
+       str = skip_spaces(next);
+
+       /* Determine if the table/device will be read only or read-write */
+       if (!strncmp("ro,", str, 3)) {
+               dm_setup_args.ro = 1;
+       } else if (!strncmp("rw,", str, 3)) {
+               dm_setup_args.ro = 0;
+       } else {
+               DMERR("failed to parse table mode");
+               goto parse_fail;
+       }
+       str = skip_spaces(str + 3);
+
+       return str;
+
+parse_fail:
+       return NULL;
+}
+
+static void __init dm_substitute_devices(char *str, size_t str_len)
+{
+       char *candidate = str;
+       char *candidate_end = str;
+       char old_char;
+       size_t len = 0;
+       dev_t dev;
+
+       if (str_len < 3)
+               return;
+
+       while (str && *str) {
+               candidate = strchr(str, '/');
+               if (!candidate)
+                       break;
+
+               /* Avoid embedded slashes */
+               if (candidate != str && *(candidate - 1) != DM_FIELD_SEP) {
+                       str = strchr(candidate, DM_FIELD_SEP);
+                       continue;
+               }
+
+               len = get_dm_option(candidate, &candidate_end, DM_FIELD_SEP);
+               str = skip_spaces(candidate_end);
+               if (len < 3 || len > 37)  /* name_to_dev_t max; maj:mix min */
+                       continue;
+
+               /* Temporarily terminate with a nul */
+               if (*candidate_end)
+                       candidate_end--;
+               old_char = *candidate_end;
+               *candidate_end = '\0';
+
+               DMDEBUG("converting candidate device '%s' to dev_t", candidate);
+               /* Use the boot-time specific device naming */
+               dev = name_to_dev_t(candidate);
+               *candidate_end = old_char;
+
+               DMDEBUG(" -> %u", dev);
+               /* No suitable replacement found */
+               if (!dev)
+                       continue;
+
+               /* Rewrite the /dev/path as a major:minor */
+               len = snprintf(candidate, len, "%u:%u", MAJOR(dev), MINOR(dev));
+               if (!len) {
+                       DMERR("error substituting device major/minor.");
+                       break;
+               }
+               candidate += len;
+               /* Pad out with spaces (fixing our nul) */
+               while (candidate < candidate_end)
+                       *(candidate++) = DM_FIELD_SEP;
+       }
+}
+
+static int __init dm_setup_parse_targets(char *str)
+{
+       char *next = NULL;
+       size_t len = 0;
+       struct dm_setup_target **target = NULL;
+
+       /* Targets are defined as per the table format but with a
+        * comma as a newline separator. */
+       target = &dm_setup_args.target;
+       while (str && *str) {
+               *target = kzalloc(sizeof(struct dm_setup_target), GFP_KERNEL);
+               if (!*target) {
+                       DMERR("failed to allocate memory for target %d",
+                             dm_setup_args.target_count);
+                       goto parse_fail;
+               }
+               dm_setup_args.target_count++;
+
+               (*target)->begin = simple_strtoull(str, &next, 10);
+               if (!next || *next != DM_FIELD_SEP) {
+                       DMERR("failed to parse starting sector for target %d",
+                             dm_setup_args.target_count - 1);
+                       goto parse_fail;
+               }
+               str = skip_spaces(next + 1);
+
+               (*target)->length = simple_strtoull(str, &next, 10);
+               if (!next || *next != DM_FIELD_SEP) {
+                       DMERR("failed to parse length for target %d",
+                             dm_setup_args.target_count - 1);
+                       goto parse_fail;
+               }
+               str = skip_spaces(next + 1);
+
+               len = get_dm_option(str, &next, DM_FIELD_SEP);
+               if (!len ||
+                   !((*target)->type = kstrndup(str, len, GFP_KERNEL))) {
+                       DMERR("failed to parse type for target %d",
+                             dm_setup_args.target_count - 1);
+                       goto parse_fail;
+               }
+               str = skip_spaces(next);
+
+               len = get_dm_option(str, &next, DM_LINE_SEP);
+               if (!len ||
+                   !((*target)->params = kstrndup(str, len, GFP_KERNEL))) {
+                       DMERR("failed to parse params for target %d",
+                             dm_setup_args.target_count - 1);
+                       goto parse_fail;
+               }
+               str = skip_spaces(next);
+
+               /* Before moving on, walk through the copied target and
+                * attempt to replace all /dev/xxx with the major:minor number.
+                * It may not be possible to resolve them traditionally at
+                * boot-time. */
+               dm_substitute_devices((*target)->params, len);
+
+               target = &((*target)->next);
+       }
+       DMDEBUG("parsed %d targets", dm_setup_args.target_count);
+
+       return 0;
+
+parse_fail:
+       return 1;
+}
+
+/*
+ * Parse the command-line parameters given our kernel, but do not
+ * actually try to invoke the DM device now; that is handled by
+ * dm_setup_drive after the low-level disk drivers have initialised.
+ * dm format is as follows:
+ *  dm="name uuid fmode,[table line 1],[table line 2],..."
+ * May be used with root=/dev/dm-0 as it always uses the first dm minor.
+ */
+
+static int __init dm_setup(char *str)
+{
+       dm_setup_args_init();
+
+       str = dm_setup_parse_device_args(str);
+       if (!str) {
+               DMDEBUG("str is NULL");
+               goto parse_fail;
+       }
+
+       /* Target parsing is delayed until we have dynamic memory */
+       dm_setup_args.targets = str;
+
+       printk(KERN_INFO "dm: will configure '%s' on dm-%d\n",
+              dm_setup_args.name, dm_setup_args.minor);
+
+       dm_early_setup = 1;
+       return 1;
+
+parse_fail:
+       printk(KERN_WARNING "dm: Invalid arguments supplied to dm=.\n");
+       return 0;
+}
+
+
+static void __init dm_setup_drive(void)
+{
+       struct mapped_device *md = NULL;
+       struct dm_table *table = NULL;
+       struct dm_setup_target *target;
+       char *uuid = dm_setup_args.uuid;
+       fmode_t fmode = FMODE_READ;
+
+       /* Finish parsing the targets. */
+       if (dm_setup_parse_targets(dm_setup_args.targets))
+               goto parse_fail;
+
+       if (dm_create(dm_setup_args.minor, &md)) {
+               DMDEBUG("failed to create the device");
+               goto dm_create_fail;
+       }
+       DMDEBUG("created device '%s'", dm_device_name(md));
+
+       /* In addition to flagging the table below, the disk must be
+        * set explicitly ro/rw. */
+       set_disk_ro(dm_disk(md), dm_setup_args.ro);
+
+       if (!dm_setup_args.ro)
+               fmode |= FMODE_WRITE;
+       if (dm_table_create(&table, fmode, dm_setup_args.target_count, md)) {
+               DMDEBUG("failed to create the table");
+               goto dm_table_create_fail;
+       }
+
+       dm_lock_md_type(md);
+       target = dm_setup_args.target;
+       while (target) {
+               DMINFO("adding target '%llu %llu %s %s'",
+                      (unsigned long long) target->begin,
+                      (unsigned long long) target->length, target->type,
+                      target->params);
+               if (dm_table_add_target(table, target->type, target->begin,
+                                       target->length, target->params)) {
+                       DMDEBUG("failed to add the target to the table");
+                       goto add_target_fail;
+               }
+               target = target->next;
+       }
+
+       if (dm_table_complete(table)) {
+               DMDEBUG("failed to complete the table");
+               goto table_complete_fail;
+       }
+
+       if (dm_get_md_type(md) == DM_TYPE_NONE) {
+               dm_set_md_type(md, dm_table_get_type(table));
+               if (dm_setup_md_queue(md)) {
+                       DMWARN("unable to set up device queue for new table.");
+                       goto setup_md_queue_fail;
+               }
+       } else if (dm_get_md_type(md) != dm_table_get_type(table)) {
+               DMWARN("can't change device type after initial table load.");
+               goto setup_md_queue_fail;
+        }
+
+       /* Suspend the device so that we can bind it to the table. */
+       if (dm_suspend(md, 0)) {
+               DMDEBUG("failed to suspend the device pre-bind");
+               goto suspend_fail;
+       }
+
+       /* Bind the table to the device. This is the only way to associate
+        * md->map with the table and set the disk capacity directly. */
+       if (dm_swap_table(md, table)) {  /* should return NULL. */
+               DMDEBUG("failed to bind the device to the table");
+               goto table_bind_fail;
+       }
+
+       /* Finally, resume and the device should be ready. */
+       if (dm_resume(md)) {
+               DMDEBUG("failed to resume the device");
+               goto resume_fail;
+       }
+
+       /* Export the dm device via the ioctl interface */
+       if (!strcmp(DM_NO_UUID, dm_setup_args.uuid))
+               uuid = NULL;
+       if (dm_ioctl_export(md, dm_setup_args.name, uuid)) {
+               DMDEBUG("failed to export device with given name and uuid");
+               goto export_fail;
+       }
+       printk(KERN_INFO "dm: dm-%d is ready\n", dm_setup_args.minor);
+
+       dm_unlock_md_type(md);
+       dm_setup_cleanup();
+       return;
+
+export_fail:
+resume_fail:
+table_bind_fail:
+suspend_fail:
+setup_md_queue_fail:
+table_complete_fail:
+add_target_fail:
+       dm_unlock_md_type(md);
+dm_table_create_fail:
+       dm_put(md);
+dm_create_fail:
+       dm_setup_cleanup();
+parse_fail:
+       printk(KERN_WARNING "dm: starting dm-%d (%s) failed\n",
+              dm_setup_args.minor, dm_setup_args.name);
+}
+
+__setup("dm=", dm_setup);
+
+void __init dm_run_setup(void)
+{
+       if (!dm_early_setup)
+               return;
+       printk(KERN_INFO "dm: attempting early device configuration.\n");
+       dm_setup_drive();
+}
index b32ad7d97ac94f52a0c50acd2a904e8a0c2f888d..f8ce812ba43e9356fdf35393d6c6b423078514ba 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/dirent.h>
 #include <linux/syscalls.h>
 #include <linux/utime.h>
+#include <linux/initramfs.h>
 
 static ssize_t __init xwrite(int fd, const char *p, size_t count)
 {
@@ -605,9 +606,25 @@ static void __init clean_rootfs(void)
 }
 #endif
 
+static int __initdata do_skip_initramfs;
+
+static int __init skip_initramfs_param(char *str)
+{
+       if (*str)
+               return 0;
+       do_skip_initramfs = 1;
+       return 1;
+}
+__setup("skip_initramfs", skip_initramfs_param);
+
 static int __init populate_rootfs(void)
 {
-       char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
+       char *err;
+
+       if (do_skip_initramfs)
+               return default_rootfs();
+
+       err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
        if (err)
                panic("%s", err); /* Failed to decompress INTERNAL initramfs */
        if (initrd_start) {
index 267739d851791b0a3e4526a3be5fb4944d8b507e..bcc8bcb053eeb05dc095780fcbc276fa2f311bd4 100644 (file)
 #include <linux/stat.h>
 #include <linux/kdev_t.h>
 #include <linux/syscalls.h>
+#include <linux/kconfig.h>
+#include <linux/initramfs.h>
 
 /*
  * Create a simple rootfs that is similar to the default initramfs
  */
-static int __init default_rootfs(void)
+#if !IS_BUILTIN(CONFIG_BLK_DEV_INITRD)
+static
+#endif
+int __init default_rootfs(void)
 {
        int err;
 
@@ -49,4 +54,6 @@ out:
        printk(KERN_WARNING "Failed to create a rootfs\n");
        return err;
 }
+#if !IS_BUILTIN(CONFIG_BLK_DEV_INITRD)
 rootfs_initcall(default_rootfs);
+#endif
index 5ffcbd354a520b88781ed2d66c7839a7aaa7f86d..34f690b9213abc7f42088329f1005e0be468e111 100644 (file)
@@ -870,6 +870,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                                return err;
                }
                if (s.mask & AUDIT_STATUS_PID) {
+                       /* NOTE: we are using task_tgid_vnr() below because
+                        *       the s.pid value is relative to the namespace
+                        *       of the caller; at present this doesn't matter
+                        *       much since you can really only run auditd
+                        *       from the initial pid namespace, but something
+                        *       to keep in mind if this changes */
                        int new_pid = s.pid;
 
                        if ((!new_pid) && (task_tgid_vnr(current) != audit_pid))
@@ -1896,7 +1902,7 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
                         " euid=%u suid=%u fsuid=%u"
                         " egid=%u sgid=%u fsgid=%u tty=%s ses=%u",
                         task_ppid_nr(tsk),
-                        task_pid_nr(tsk),
+                        task_tgid_nr(tsk),
                         from_kuid(&init_user_ns, audit_get_loginuid(tsk)),
                         from_kuid(&init_user_ns, cred->uid),
                         from_kgid(&init_user_ns, cred->gid),
index 48f45987dc6c7782555103e58e42e6f4743799e4..63f0e495f5176655f16bbd998ed70ab600cbc64c 100644 (file)
@@ -458,7 +458,7 @@ static int audit_filter_rules(struct task_struct *tsk,
 
                switch (f->type) {
                case AUDIT_PID:
-                       pid = task_pid_nr(tsk);
+                       pid = task_tgid_nr(tsk);
                        result = audit_comparator(pid, f->op, f->val);
                        break;
                case AUDIT_PPID:
@@ -1987,7 +1987,7 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
        if (!ab)
                return;
-       audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid);
+       audit_log_format(ab, "pid=%d uid=%u", task_tgid_nr(current), uid);
        audit_log_task_context(ab);
        audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d",
                         oldloginuid, loginuid, oldsessionid, sessionid, !rc);
@@ -2212,7 +2212,7 @@ void __audit_ptrace(struct task_struct *t)
 {
        struct audit_context *context = current->audit_context;
 
-       context->target_pid = task_pid_nr(t);
+       context->target_pid = task_tgid_nr(t);
        context->target_auid = audit_get_loginuid(t);
        context->target_uid = task_uid(t);
        context->target_sessionid = audit_get_sessionid(t);
@@ -2237,7 +2237,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
 
        if (audit_pid && t->tgid == audit_pid) {
                if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
-                       audit_sig_pid = task_pid_nr(tsk);
+                       audit_sig_pid = task_tgid_nr(tsk);
                        if (uid_valid(tsk->loginuid))
                                audit_sig_uid = tsk->loginuid;
                        else
@@ -2337,7 +2337,7 @@ int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
 void __audit_log_capset(const struct cred *new, const struct cred *old)
 {
        struct audit_context *context = current->audit_context;
-       context->capset.pid = task_pid_nr(current);
+       context->capset.pid = task_tgid_nr(current);
        context->capset.cap.effective   = new->cap_effective;
        context->capset.cap.inheritable = new->cap_effective;
        context->capset.cap.permitted   = new->cap_permitted;
@@ -2369,7 +2369,7 @@ static void audit_log_task(struct audit_buffer *ab)
                         from_kgid(&init_user_ns, gid),
                         sessionid);
        audit_log_task_context(ab);
-       audit_log_format(ab, " pid=%d comm=", task_pid_nr(current));
+       audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current));
        audit_log_untrustedstring(ab, get_task_comm(comm, current));
        audit_log_d_path_exe(ab, current->mm);
 }
index b5946676f84ef0013f07bcce35b10534dd23de04..03a1b3f754d6aa3fc3be4aa3226beda3973bc3f6 100644 (file)
@@ -2686,7 +2686,8 @@ static int cgroup_procs_write_permission(struct task_struct *task,
         */
        if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
            !uid_eq(cred->euid, tcred->uid) &&
-           !uid_eq(cred->euid, tcred->suid))
+           !uid_eq(cred->euid, tcred->suid) &&
+           !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
                ret = -EACCES;
 
        if (!ret && cgroup_on_dfl(dst_cgrp)) {
@@ -5326,6 +5327,12 @@ int __init cgroup_init(void)
        BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
        BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
 
+       /*
+        * The latency of the synchronize_sched() is too high for cgroups,
+        * avoid it at the cost of forcing all readers into the slow path.
+        */
+       rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss);
+
        mutex_lock(&cgroup_mutex);
 
        /* Add init_css_set to the hash table */
index 85ff5e26e23b45b34201120c758082599f995b7e..c8a1751be2244233f7ebeea0db601593b3e468a3 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/irq.h>
 #include <trace/events/power.h>
 
+#include <trace/events/sched.h>
+
 #include "smpboot.h"
 
 #ifdef CONFIG_SMP
@@ -183,10 +185,17 @@ void cpu_hotplug_disable(void)
 }
 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
 
+static void __cpu_hotplug_enable(void)
+{
+       if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
+               return;
+       cpu_hotplug_disabled--;
+}
+
 void cpu_hotplug_enable(void)
 {
        cpu_maps_update_begin();
-       WARN_ON(--cpu_hotplug_disabled < 0);
+       __cpu_hotplug_enable();
        cpu_maps_update_done();
 }
 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
@@ -425,6 +434,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
 
 out_release:
        cpu_hotplug_done();
+       trace_sched_cpu_hotplug(cpu, err, 0);
        if (!err)
                cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
        return err;
@@ -530,6 +540,7 @@ out_notify:
                __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
 out:
        cpu_hotplug_done();
+       trace_sched_cpu_hotplug(cpu, ret, 1);
 
        return ret;
 }
@@ -623,10 +634,11 @@ void __weak arch_enable_nonboot_cpus_end(void)
 void enable_nonboot_cpus(void)
 {
        int cpu, error;
+       struct device *cpu_device;
 
        /* Allow everyone to use the CPU hotplug again */
        cpu_maps_update_begin();
-       WARN_ON(--cpu_hotplug_disabled < 0);
+       __cpu_hotplug_enable();
        if (cpumask_empty(frozen_cpus))
                goto out;
 
@@ -640,6 +652,12 @@ void enable_nonboot_cpus(void)
                trace_suspend_resume(TPS("CPU_ON"), cpu, false);
                if (!error) {
                        pr_info("CPU%d is up\n", cpu);
+                       cpu_device = get_cpu_device(cpu);
+                       if (!cpu_device)
+                               pr_err("%s: failed to get cpu%d device\n",
+                                      __func__, cpu);
+                       else
+                               kobject_uevent(&cpu_device->kobj, KOBJ_ONLINE);
                        continue;
                }
                pr_warn("Error taking CPU%d up: %d\n", cpu, error);
@@ -827,3 +845,23 @@ void init_cpu_online(const struct cpumask *src)
 {
        cpumask_copy(to_cpumask(cpu_online_bits), src);
 }
+
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+       atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+       atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+
+void idle_notifier_call_chain(unsigned long val)
+{
+       atomic_notifier_call_chain(&idle_notifier, val, NULL);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
index b271353d5202ba4fa2e8f9d469e1efd4307cf609..3f9db31c5d043287b7e566fdaace6446ee46e6a1 100644 (file)
@@ -98,6 +98,7 @@ struct cpuset {
 
        /* user-configured CPUs and Memory Nodes allow to tasks */
        cpumask_var_t cpus_allowed;
+       cpumask_var_t cpus_requested;
        nodemask_t mems_allowed;
 
        /* effective CPUs and Memory Nodes allow to tasks */
@@ -397,7 +398,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
 
 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
 {
-       return  cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
+       return  cpumask_subset(p->cpus_requested, q->cpus_requested) &&
                nodes_subset(p->mems_allowed, q->mems_allowed) &&
                is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
                is_mem_exclusive(p) <= is_mem_exclusive(q);
@@ -497,7 +498,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
        cpuset_for_each_child(c, css, par) {
                if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
                    c != cur &&
-                   cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
+                   cpumask_intersects(trial->cpus_requested, c->cpus_requested))
                        goto out;
                if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
                    c != cur &&
@@ -956,17 +957,18 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
        if (!*buf) {
                cpumask_clear(trialcs->cpus_allowed);
        } else {
-               retval = cpulist_parse(buf, trialcs->cpus_allowed);
+               retval = cpulist_parse(buf, trialcs->cpus_requested);
                if (retval < 0)
                        return retval;
 
-               if (!cpumask_subset(trialcs->cpus_allowed,
-                                   top_cpuset.cpus_allowed))
+               if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
                        return -EINVAL;
+
+               cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
        }
 
        /* Nothing to do if the cpus didn't change */
-       if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
+       if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested))
                return 0;
 
        retval = validate_change(cs, trialcs);
@@ -975,6 +977,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
 
        spin_lock_irq(&callback_lock);
        cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
+       cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
        spin_unlock_irq(&callback_lock);
 
        /* use trialcs->cpus_allowed as a temp variable */
@@ -1765,7 +1768,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
 
        switch (type) {
        case FILE_CPULIST:
-               seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
+               seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested));
                break;
        case FILE_MEMLIST:
                seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
@@ -1954,11 +1957,14 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
                return ERR_PTR(-ENOMEM);
        if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
                goto free_cs;
+       if (!alloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL))
+               goto free_allowed;
        if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
-               goto free_cpus;
+               goto free_requested;
 
        set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
        cpumask_clear(cs->cpus_allowed);
+       cpumask_clear(cs->cpus_requested);
        nodes_clear(cs->mems_allowed);
        cpumask_clear(cs->effective_cpus);
        nodes_clear(cs->effective_mems);
@@ -1967,7 +1973,9 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
 
        return &cs->css;
 
-free_cpus:
+free_requested:
+       free_cpumask_var(cs->cpus_requested);
+free_allowed:
        free_cpumask_var(cs->cpus_allowed);
 free_cs:
        kfree(cs);
@@ -2030,6 +2038,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
        cs->mems_allowed = parent->mems_allowed;
        cs->effective_mems = parent->mems_allowed;
        cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
+       cpumask_copy(cs->cpus_requested, parent->cpus_requested);
        cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
        spin_unlock_irq(&callback_lock);
 out_unlock:
@@ -2064,6 +2073,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
 
        free_cpumask_var(cs->effective_cpus);
        free_cpumask_var(cs->cpus_allowed);
+       free_cpumask_var(cs->cpus_requested);
        kfree(cs);
 }
 
@@ -2128,8 +2138,11 @@ int __init cpuset_init(void)
                BUG();
        if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL))
                BUG();
+       if (!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL))
+               BUG();
 
        cpumask_setall(top_cpuset.cpus_allowed);
+       cpumask_setall(top_cpuset.cpus_requested);
        nodes_setall(top_cpuset.mems_allowed);
        cpumask_setall(top_cpuset.effective_cpus);
        nodes_setall(top_cpuset.effective_mems);
@@ -2263,7 +2276,7 @@ retry:
                goto retry;
        }
 
-       cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus);
+       cpumask_and(&new_cpus, cs->cpus_requested, parent_cs(cs)->effective_cpus);
        nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
 
        cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
index fc1ef736253c79954686d018a2deca4c86300fa6..0b891286a150c8535454a0be774180016c7a5536 100644 (file)
@@ -216,7 +216,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
        int i;
        int diag, dtab_count;
        int key;
-
+       static int last_crlf;
 
        diag = kdbgetintenv("DTABCOUNT", &dtab_count);
        if (diag)
@@ -237,6 +237,9 @@ poll_again:
                return buffer;
        if (key != 9)
                tab = 0;
+       if (key != 10 && key != 13)
+               last_crlf = 0;
+
        switch (key) {
        case 8: /* backspace */
                if (cp > buffer) {
@@ -254,7 +257,12 @@ poll_again:
                        *cp = tmp;
                }
                break;
-       case 13: /* enter */
+       case 10: /* new line */
+       case 13: /* carriage return */
+               /* handle \n after \r */
+               if (last_crlf && last_crlf != key)
+                       break;
+               last_crlf = key;
                *lastchar++ = '\n';
                *lastchar++ = '\0';
                if (!KDB_STATE(KGDB_TRANS)) {
index b4998fe563dc31068bca3f620393e002228f6667..f4fdaff76f6d90a77274e37ac919baede07dcf4e 100644 (file)
@@ -175,8 +175,13 @@ static struct srcu_struct pmus_srcu;
  *   0 - disallow raw tracepoint access for unpriv
  *   1 - disallow cpu events for unpriv
  *   2 - disallow kernel profiling for unpriv
+ *   3 - disallow all unpriv perf event use
  */
+#ifdef CONFIG_SECURITY_PERF_EVENTS_RESTRICT
+int sysctl_perf_event_paranoid __read_mostly = 3;
+#else
 int sysctl_perf_event_paranoid __read_mostly = 1;
+#endif
 
 /* Minimum for 512 kiB + 1 user control page */
 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
@@ -8415,6 +8420,9 @@ SYSCALL_DEFINE5(perf_event_open,
        if (flags & ~PERF_FLAG_ALL)
                return -EINVAL;
 
+       if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
+               return -EACCES;
+
        err = perf_copy_attr(attr_uptr, &attr);
        if (err)
                return err;
index ffba5df4abd54cd2dc146a09fa152ed84502e213..62c4bd4abd3a2a99aac0d4cf6fde311770cf0a0d 100644 (file)
@@ -54,6 +54,8 @@
 #include <linux/writeback.h>
 #include <linux/shm.h>
 
+#include "sched/tune.h"
+
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include <asm/pgtable.h>
@@ -699,6 +701,9 @@ void do_exit(long code)
        }
 
        exit_signals(tsk);  /* sets PF_EXITING */
+
+       schedtune_exit_task(tsk);
+
        /*
         * tsk->flags are checked in the futex code to protect against
         * an exiting task cleaning up the robust pi futexes.
index 7161ebe67cbb5eae1b2c4dfaadad2df2e7e04404..18a5cb17035a1b23c9df9323ee05da774ad6e60b 100644 (file)
@@ -823,7 +823,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
 
        mm = get_task_mm(task);
        if (mm && mm != current->mm &&
-                       !ptrace_may_access(task, mode)) {
+                       !ptrace_may_access(task, mode) &&
+                       !capable(CAP_SYS_RESOURCE)) {
                mmput(mm);
                mm = ERR_PTR(-EACCES);
        }
index f231e0bb311ce0827d281d34f737a3a06405c072..ce182599cf2e98b51831adbf5dca6ce545df0d7f 100644 (file)
 #include <linux/sched.h>
 #include <linux/errno.h>
 
-int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
+int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
                        const char *name, struct lock_class_key *rwsem_key)
 {
-       brw->fast_read_ctr = alloc_percpu(int);
-       if (unlikely(!brw->fast_read_ctr))
+       sem->read_count = alloc_percpu(int);
+       if (unlikely(!sem->read_count))
                return -ENOMEM;
 
        /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
-       __init_rwsem(&brw->rw_sem, name, rwsem_key);
-       rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
-       atomic_set(&brw->slow_read_ctr, 0);
-       init_waitqueue_head(&brw->write_waitq);
+       rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
+       __init_rwsem(&sem->rw_sem, name, rwsem_key);
+       init_waitqueue_head(&sem->writer);
+       sem->readers_block = 0;
        return 0;
 }
 EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
 
-void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
+void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
 {
        /*
         * XXX: temporary kludge. The error path in alloc_super()
         * assumes that percpu_free_rwsem() is safe after kzalloc().
         */
-       if (!brw->fast_read_ctr)
+       if (!sem->read_count)
                return;
 
-       rcu_sync_dtor(&brw->rss);
-       free_percpu(brw->fast_read_ctr);
-       brw->fast_read_ctr = NULL; /* catch use after free bugs */
+       rcu_sync_dtor(&sem->rss);
+       free_percpu(sem->read_count);
+       sem->read_count = NULL; /* catch use after free bugs */
 }
+EXPORT_SYMBOL_GPL(percpu_free_rwsem);
 
-/*
- * This is the fast-path for down_read/up_read. If it succeeds we rely
- * on the barriers provided by rcu_sync_enter/exit; see the comments in
- * percpu_down_write() and percpu_up_write().
- *
- * If this helper fails the callers rely on the normal rw_semaphore and
- * atomic_dec_and_test(), so in this case we have the necessary barriers.
- */
-static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
+int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
 {
-       bool success;
+       /*
+        * Due to having preemption disabled the decrement happens on
+        * the same CPU as the increment, avoiding the
+        * increment-on-one-CPU-and-decrement-on-another problem.
+        *
+        * If the reader misses the writer's assignment of readers_block, then
+        * the writer is guaranteed to see the reader's increment.
+        *
+        * Conversely, any readers that increment their sem->read_count after
+        * the writer looks are guaranteed to see the readers_block value,
+        * which in turn means that they are guaranteed to immediately
+        * decrement their sem->read_count, so that it doesn't matter that the
+        * writer missed them.
+        */
 
-       preempt_disable();
-       success = rcu_sync_is_idle(&brw->rss);
-       if (likely(success))
-               __this_cpu_add(*brw->fast_read_ctr, val);
-       preempt_enable();
+       smp_mb(); /* A matches D */
 
-       return success;
-}
+       /*
+        * If !readers_block the critical section starts here, matched by the
+        * release in percpu_up_write().
+        */
+       if (likely(!smp_load_acquire(&sem->readers_block)))
+               return 1;
 
-/*
- * Like the normal down_read() this is not recursive, the writer can
- * come after the first percpu_down_read() and create the deadlock.
- *
- * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
- * percpu_up_read() does rwsem_release(). This pairs with the usage
- * of ->rw_sem in percpu_down/up_write().
- */
-void percpu_down_read(struct percpu_rw_semaphore *brw)
-{
-       might_sleep();
-       rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
+       /*
+        * Per the above comment; we still have preemption disabled and
+        * will thus decrement on the same CPU as we incremented.
+        */
+       __percpu_up_read(sem);
 
-       if (likely(update_fast_ctr(brw, +1)))
-               return;
+       if (try)
+               return 0;
 
-       /* Avoid rwsem_acquire_read() and rwsem_release() */
-       __down_read(&brw->rw_sem);
-       atomic_inc(&brw->slow_read_ctr);
-       __up_read(&brw->rw_sem);
-}
-EXPORT_SYMBOL_GPL(percpu_down_read);
+       /*
+        * We either call schedule() in the wait, or we'll fall through
+        * and reschedule on the preempt_enable() in percpu_down_read().
+        */
+       preempt_enable_no_resched();
 
-int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
-{
-       if (unlikely(!update_fast_ctr(brw, +1))) {
-               if (!__down_read_trylock(&brw->rw_sem))
-                       return 0;
-               atomic_inc(&brw->slow_read_ctr);
-               __up_read(&brw->rw_sem);
-       }
-
-       rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_);
+       /*
+        * Avoid lockdep for the down/up_read() we already have them.
+        */
+       __down_read(&sem->rw_sem);
+       this_cpu_inc(*sem->read_count);
+       __up_read(&sem->rw_sem);
+
+       preempt_disable();
        return 1;
 }
+EXPORT_SYMBOL_GPL(__percpu_down_read);
 
-void percpu_up_read(struct percpu_rw_semaphore *brw)
+void __percpu_up_read(struct percpu_rw_semaphore *sem)
 {
-       rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
-
-       if (likely(update_fast_ctr(brw, -1)))
-               return;
+       smp_mb(); /* B matches C */
+       /*
+        * In other words, if they see our decrement (presumably to aggregate
+        * zero, as that is the only time it matters) they will also see our
+        * critical section.
+        */
+       __this_cpu_dec(*sem->read_count);
 
-       /* false-positive is possible but harmless */
-       if (atomic_dec_and_test(&brw->slow_read_ctr))
-               wake_up_all(&brw->write_waitq);
+       /* Prod writer to recheck readers_active */
+       wake_up(&sem->writer);
 }
-EXPORT_SYMBOL_GPL(percpu_up_read);
+EXPORT_SYMBOL_GPL(__percpu_up_read);
+
+#define per_cpu_sum(var)                                               \
+({                                                                     \
+       typeof(var) __sum = 0;                                          \
+       int cpu;                                                        \
+       compiletime_assert_atomic_type(__sum);                          \
+       for_each_possible_cpu(cpu)                                      \
+               __sum += per_cpu(var, cpu);                             \
+       __sum;                                                          \
+})
 
-static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
+/*
+ * Return true if the modular sum of the sem->read_count per-CPU variable is
+ * zero.  If this sum is zero, then it is stable due to the fact that if any
+ * newly arriving readers increment a given counter, they will immediately
+ * decrement that same counter.
+ */
+static bool readers_active_check(struct percpu_rw_semaphore *sem)
 {
-       unsigned int sum = 0;
-       int cpu;
+       if (per_cpu_sum(*sem->read_count) != 0)
+               return false;
+
+       /*
+        * If we observed the decrement; ensure we see the entire critical
+        * section.
+        */
 
-       for_each_possible_cpu(cpu) {
-               sum += per_cpu(*brw->fast_read_ctr, cpu);
-               per_cpu(*brw->fast_read_ctr, cpu) = 0;
-       }
+       smp_mb(); /* C matches B */
 
-       return sum;
+       return true;
 }
 
-void percpu_down_write(struct percpu_rw_semaphore *brw)
+void percpu_down_write(struct percpu_rw_semaphore *sem)
 {
+       /* Notify readers to take the slow path. */
+       rcu_sync_enter(&sem->rss);
+
+       down_write(&sem->rw_sem);
+
        /*
-        * Make rcu_sync_is_idle() == F and thus disable the fast-path in
-        * percpu_down_read() and percpu_up_read(), and wait for gp pass.
-        *
-        * The latter synchronises us with the preceding readers which used
-        * the fast-past, so we can not miss the result of __this_cpu_add()
-        * or anything else inside their criticial sections.
+        * Notify new readers to block; up until now, and thus throughout the
+        * longish rcu_sync_enter() above, new readers could still come in.
         */
-       rcu_sync_enter(&brw->rss);
+       WRITE_ONCE(sem->readers_block, 1);
 
-       /* exclude other writers, and block the new readers completely */
-       down_write(&brw->rw_sem);
+       smp_mb(); /* D matches A */
 
-       /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
-       atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
+       /*
+        * If they don't see our writer of readers_block, then we are
+        * guaranteed to see their sem->read_count increment, and therefore
+        * will wait for them.
+        */
 
-       /* wait for all readers to complete their percpu_up_read() */
-       wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
+       /* Wait for all now active readers to complete. */
+       wait_event(sem->writer, readers_active_check(sem));
 }
 EXPORT_SYMBOL_GPL(percpu_down_write);
 
-void percpu_up_write(struct percpu_rw_semaphore *brw)
+void percpu_up_write(struct percpu_rw_semaphore *sem)
 {
-       /* release the lock, but the readers can't use the fast-path */
-       up_write(&brw->rw_sem);
        /*
-        * Enable the fast-path in percpu_down_read() and percpu_up_read()
-        * but only after another gp pass; this adds the necessary barrier
-        * to ensure the reader can't miss the changes done by us.
+        * Signal the writer is done, no fast path yet.
+        *
+        * One reason that we cannot just immediately flip to readers_fast is
+        * that new readers might fail to see the results of this writer's
+        * critical section.
+        *
+        * Therefore we force it through the slow path which guarantees an
+        * acquire and thereby guarantees the critical section's consistency.
+        */
+       smp_store_release(&sem->readers_block, 0);
+
+       /*
+        * Release the write lock, this will allow readers back in the game.
+        */
+       up_write(&sem->rw_sem);
+
+       /*
+        * Once this completes (at least one RCU-sched grace period hence) the
+        * reader fast path will be available again. Safe to use outside the
+        * exclusive write lock because its counting.
         */
-       rcu_sync_exit(&brw->rss);
+       rcu_sync_exit(&sem->rss);
 }
 EXPORT_SYMBOL_GPL(percpu_up_write);
index 02e8dfaa1ce2d2085bd85e165ad6f25b9697a11a..6d6f63be1f9b487a6c19c3d5210c6ec703a19216 100644 (file)
@@ -1,6 +1,7 @@
 config SUSPEND
        bool "Suspend to RAM and standby"
        depends on ARCH_SUSPEND_POSSIBLE
+       select RTC_LIB
        default y
        ---help---
          Allow the system to enter sleep states in which main memory is
@@ -28,6 +29,15 @@ config SUSPEND_SKIP_SYNC
          of suspend, or they are content with invoking sync() from
          user-space before invoking suspend.  Say Y if that's your case.
 
+config WAKELOCK
+       bool "Android's method of preventing suspend"
+       default y
+       ---help---
+         This allows applications to prevent the CPU from suspending while
+         they need it.
+
+         Say Y if you are running an android userspace.
+
 config HIBERNATE_CALLBACKS
        bool
 
index cb880a14cc396eec6af959007011fd6830b14baa..22eb9ed879ade88b5f427728b07e41742aabb486 100644 (file)
@@ -12,3 +12,5 @@ obj-$(CONFIG_PM_AUTOSLEEP)    += autosleep.o
 obj-$(CONFIG_PM_WAKELOCKS)     += wakelock.o
 
 obj-$(CONFIG_MAGIC_SYSRQ)      += poweroff.o
+
+obj-$(CONFIG_SUSPEND)  += wakeup_reason.o
index 564f786df4701a82a8de89821304cfb0f0b7a337..e7f1f736a5b6344f7bc0aef53b71e8ebf9caf034 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/workqueue.h>
 #include <linux/kmod.h>
 #include <trace/events/power.h>
+#include <linux/wakeup_reason.h>
 
 /* 
  * Timeout for stopping processes
@@ -35,6 +36,9 @@ static int try_to_freeze_tasks(bool user_only)
        unsigned int elapsed_msecs;
        bool wakeup = false;
        int sleep_usecs = USEC_PER_MSEC;
+#ifdef CONFIG_PM_SLEEP
+       char suspend_abort[MAX_SUSPEND_ABORT_LEN];
+#endif
 
        do_gettimeofday(&start);
 
@@ -64,6 +68,11 @@ static int try_to_freeze_tasks(bool user_only)
                        break;
 
                if (pm_wakeup_pending()) {
+#ifdef CONFIG_PM_SLEEP
+                       pm_get_active_wakeup_sources(suspend_abort,
+                               MAX_SUSPEND_ABORT_LEN);
+                       log_suspend_abort_reason(suspend_abort);
+#endif
                        wakeup = true;
                        break;
                }
@@ -83,15 +92,17 @@ static int try_to_freeze_tasks(bool user_only)
        do_div(elapsed_msecs64, NSEC_PER_MSEC);
        elapsed_msecs = elapsed_msecs64;
 
-       if (todo) {
+       if (wakeup) {
                pr_cont("\n");
-               pr_err("Freezing of tasks %s after %d.%03d seconds "
-                      "(%d tasks refusing to freeze, wq_busy=%d):\n",
-                      wakeup ? "aborted" : "failed",
+               pr_err("Freezing of tasks aborted after %d.%03d seconds",
+                      elapsed_msecs / 1000, elapsed_msecs % 1000);
+       } else if (todo) {
+               pr_cont("\n");
+               pr_err("Freezing of tasks failed after %d.%03d seconds"
+                      " (%d tasks refusing to freeze, wq_busy=%d):\n",
                       elapsed_msecs / 1000, elapsed_msecs % 1000,
                       todo - wq_busy, wq_busy);
 
-               if (!wakeup) {
                        read_lock(&tasklist_lock);
                        for_each_process_thread(g, p) {
                                if (p != current && !freezer_should_skip(p)
@@ -99,7 +110,6 @@ static int try_to_freeze_tasks(bool user_only)
                                        sched_show_task(p);
                        }
                        read_unlock(&tasklist_lock);
-               }
        } else {
                pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
                        elapsed_msecs % 1000);
index f9fe133c13e24de8e91f4ea81abebe6c58a3fd23..024411816ccf2c5110247ce9be5817f1fdc9deed 100644 (file)
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
 #include <linux/ftrace.h>
+#include <linux/rtc.h>
 #include <trace/events/power.h>
 #include <linux/compiler.h>
 #include <linux/moduleparam.h>
+#include <linux/wakeup_reason.h>
 
 #include "power.h"
 
@@ -312,7 +314,8 @@ void __weak arch_suspend_enable_irqs(void)
  */
 static int suspend_enter(suspend_state_t state, bool *wakeup)
 {
-       int error;
+       char suspend_abort[MAX_SUSPEND_ABORT_LEN];
+       int error, last_dev;
 
        error = platform_suspend_prepare(state);
        if (error)
@@ -320,7 +323,11 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
 
        error = dpm_suspend_late(PMSG_SUSPEND);
        if (error) {
+               last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+               last_dev %= REC_FAILED_NUM;
                printk(KERN_ERR "PM: late suspend of devices failed\n");
+               log_suspend_abort_reason("%s device failed to power down",
+                       suspend_stats.failed_devs[last_dev]);
                goto Platform_finish;
        }
        error = platform_suspend_prepare_late(state);
@@ -329,7 +336,11 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
 
        error = dpm_suspend_noirq(PMSG_SUSPEND);
        if (error) {
+               last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+               last_dev %= REC_FAILED_NUM;
                printk(KERN_ERR "PM: noirq suspend of devices failed\n");
+               log_suspend_abort_reason("noirq suspend of %s device failed",
+                       suspend_stats.failed_devs[last_dev]);
                goto Platform_early_resume;
        }
        error = platform_suspend_prepare_noirq(state);
@@ -353,8 +364,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
        }
 
        error = disable_nonboot_cpus();
-       if (error || suspend_test(TEST_CPUS))
+       if (error || suspend_test(TEST_CPUS)) {
+               log_suspend_abort_reason("Disabling non-boot cpus failed");
                goto Enable_cpus;
+       }
 
        arch_suspend_disable_irqs();
        BUG_ON(!irqs_disabled());
@@ -370,6 +383,9 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
                                state, false);
                        events_check_enabled = false;
                } else if (*wakeup) {
+                       pm_get_active_wakeup_sources(suspend_abort,
+                               MAX_SUSPEND_ABORT_LEN);
+                       log_suspend_abort_reason(suspend_abort);
                        error = -EBUSY;
                }
                syscore_resume();
@@ -417,6 +433,7 @@ int suspend_devices_and_enter(suspend_state_t state)
        error = dpm_suspend_start(PMSG_SUSPEND);
        if (error) {
                pr_err("PM: Some devices failed to suspend, or early wake event detected\n");
+               log_suspend_abort_reason("Some devices failed to suspend, or early wake event detected");
                goto Recover_platform;
        }
        suspend_test_finish("suspend devices");
@@ -518,6 +535,18 @@ static int enter_state(suspend_state_t state)
        return error;
 }
 
+static void pm_suspend_marker(char *annotation)
+{
+       struct timespec ts;
+       struct rtc_time tm;
+
+       getnstimeofday(&ts);
+       rtc_time_to_tm(ts.tv_sec, &tm);
+       pr_info("PM: suspend %s %d-%02d-%02d %02d:%02d:%02d.%09lu UTC\n",
+               annotation, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+               tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
+}
+
 /**
  * pm_suspend - Externally visible function for suspending the system.
  * @state: System sleep state to enter.
@@ -532,6 +561,7 @@ int pm_suspend(suspend_state_t state)
        if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
                return -EINVAL;
 
+       pm_suspend_marker("entry");
        error = enter_state(state);
        if (error) {
                suspend_stats.fail++;
@@ -539,6 +569,7 @@ int pm_suspend(suspend_state_t state)
        } else {
                suspend_stats.success++;
        }
+       pm_suspend_marker("exit");
        return error;
 }
 EXPORT_SYMBOL(pm_suspend);
diff --git a/kernel/power/wakeup_reason.c b/kernel/power/wakeup_reason.c
new file mode 100644 (file)
index 0000000..252611f
--- /dev/null
@@ -0,0 +1,225 @@
+/*
+ * kernel/power/wakeup_reason.c
+ *
+ * Logs the reasons which caused the kernel to resume from
+ * the suspend mode.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/wakeup_reason.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+
+
+#define MAX_WAKEUP_REASON_IRQS 32
+static int irq_list[MAX_WAKEUP_REASON_IRQS];
+static int irqcount;
+static bool suspend_abort;
+static char abort_reason[MAX_SUSPEND_ABORT_LEN];
+static struct kobject *wakeup_reason;
+static DEFINE_SPINLOCK(resume_reason_lock);
+
+static ktime_t last_monotime; /* monotonic time before last suspend */
+static ktime_t curr_monotime; /* monotonic time after last suspend */
+static ktime_t last_stime; /* monotonic boottime offset before last suspend */
+static ktime_t curr_stime; /* monotonic boottime offset after last suspend */
+
+static ssize_t last_resume_reason_show(struct kobject *kobj, struct kobj_attribute *attr,
+               char *buf)
+{
+       int irq_no, buf_offset = 0;
+       struct irq_desc *desc;
+       spin_lock(&resume_reason_lock);
+       if (suspend_abort) {
+               buf_offset = sprintf(buf, "Abort: %s", abort_reason);
+       } else {
+               for (irq_no = 0; irq_no < irqcount; irq_no++) {
+                       desc = irq_to_desc(irq_list[irq_no]);
+                       if (desc && desc->action && desc->action->name)
+                               buf_offset += sprintf(buf + buf_offset, "%d %s\n",
+                                               irq_list[irq_no], desc->action->name);
+                       else
+                               buf_offset += sprintf(buf + buf_offset, "%d\n",
+                                               irq_list[irq_no]);
+               }
+       }
+       spin_unlock(&resume_reason_lock);
+       return buf_offset;
+}
+
+static ssize_t last_suspend_time_show(struct kobject *kobj,
+                       struct kobj_attribute *attr, char *buf)
+{
+       struct timespec sleep_time;
+       struct timespec total_time;
+       struct timespec suspend_resume_time;
+
+       /*
+        * total_time is calculated from monotonic bootoffsets because
+        * unlike CLOCK_MONOTONIC it include the time spent in suspend state.
+        */
+       total_time = ktime_to_timespec(ktime_sub(curr_stime, last_stime));
+
+       /*
+        * suspend_resume_time is calculated as monotonic (CLOCK_MONOTONIC)
+        * time interval before entering suspend and post suspend.
+        */
+       suspend_resume_time = ktime_to_timespec(ktime_sub(curr_monotime, last_monotime));
+
+       /* sleep_time = total_time - suspend_resume_time */
+       sleep_time = timespec_sub(total_time, suspend_resume_time);
+
+       /* Export suspend_resume_time and sleep_time in pair here. */
+       return sprintf(buf, "%lu.%09lu %lu.%09lu\n",
+                               suspend_resume_time.tv_sec, suspend_resume_time.tv_nsec,
+                               sleep_time.tv_sec, sleep_time.tv_nsec);
+}
+
+static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
+static struct kobj_attribute suspend_time = __ATTR_RO(last_suspend_time);
+
+static struct attribute *attrs[] = {
+       &resume_reason.attr,
+       &suspend_time.attr,
+       NULL,
+};
+static struct attribute_group attr_group = {
+       .attrs = attrs,
+};
+
+/*
+ * logs all the wake up reasons to the kernel
+ * stores the irqs to expose them to the userspace via sysfs
+ */
+void log_wakeup_reason(int irq)
+{
+       struct irq_desc *desc;
+       desc = irq_to_desc(irq);
+       if (desc && desc->action && desc->action->name)
+               printk(KERN_INFO "Resume caused by IRQ %d, %s\n", irq,
+                               desc->action->name);
+       else
+               printk(KERN_INFO "Resume caused by IRQ %d\n", irq);
+
+       spin_lock(&resume_reason_lock);
+       if (irqcount == MAX_WAKEUP_REASON_IRQS) {
+               spin_unlock(&resume_reason_lock);
+               printk(KERN_WARNING "Resume caused by more than %d IRQs\n",
+                               MAX_WAKEUP_REASON_IRQS);
+               return;
+       }
+
+       irq_list[irqcount++] = irq;
+       spin_unlock(&resume_reason_lock);
+}
+
+int check_wakeup_reason(int irq)
+{
+       int irq_no;
+       int ret = false;
+
+       spin_lock(&resume_reason_lock);
+       for (irq_no = 0; irq_no < irqcount; irq_no++)
+               if (irq_list[irq_no] == irq) {
+                       ret = true;
+                       break;
+       }
+       spin_unlock(&resume_reason_lock);
+       return ret;
+}
+
+void log_suspend_abort_reason(const char *fmt, ...)
+{
+       va_list args;
+
+       spin_lock(&resume_reason_lock);
+
+       //Suspend abort reason has already been logged.
+       if (suspend_abort) {
+               spin_unlock(&resume_reason_lock);
+               return;
+       }
+
+       suspend_abort = true;
+       va_start(args, fmt);
+       vsnprintf(abort_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
+       va_end(args);
+       spin_unlock(&resume_reason_lock);
+}
+
+/* Detects a suspend and clears all the previous wake up reasons*/
+static int wakeup_reason_pm_event(struct notifier_block *notifier,
+               unsigned long pm_event, void *unused)
+{
+       switch (pm_event) {
+       case PM_SUSPEND_PREPARE:
+               spin_lock(&resume_reason_lock);
+               irqcount = 0;
+               suspend_abort = false;
+               spin_unlock(&resume_reason_lock);
+               /* monotonic time since boot */
+               last_monotime = ktime_get();
+               /* monotonic time since boot including the time spent in suspend */
+               last_stime = ktime_get_boottime();
+               break;
+       case PM_POST_SUSPEND:
+               /* monotonic time since boot */
+               curr_monotime = ktime_get();
+               /* monotonic time since boot including the time spent in suspend */
+               curr_stime = ktime_get_boottime();
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block wakeup_reason_pm_notifier_block = {
+       .notifier_call = wakeup_reason_pm_event,
+};
+
+/* Initializes the sysfs parameter
+ * registers the pm_event notifier
+ */
+int __init wakeup_reason_init(void)
+{
+       int retval;
+
+       retval = register_pm_notifier(&wakeup_reason_pm_notifier_block);
+       if (retval)
+               printk(KERN_WARNING "[%s] failed to register PM notifier %d\n",
+                               __func__, retval);
+
+       wakeup_reason = kobject_create_and_add("wakeup_reasons", kernel_kobj);
+       if (!wakeup_reason) {
+               printk(KERN_WARNING "[%s] failed to create a sysfs kobject\n",
+                               __func__);
+               return 1;
+       }
+       retval = sysfs_create_group(wakeup_reason, &attr_group);
+       if (retval) {
+               kobject_put(wakeup_reason);
+               printk(KERN_WARNING "[%s] failed to create a sysfs group %d\n",
+                               __func__, retval);
+       }
+       return 0;
+}
+
+late_initcall(wakeup_reason_init);
index c048e34b177f2a5e63557f672e9827330c1b34cf..e7e586bb20224d73d782ecac1048402300b98968 100644 (file)
 #include "console_cmdline.h"
 #include "braille.h"
 
+#ifdef CONFIG_EARLY_PRINTK_DIRECT
+extern void printascii(char *);
+#endif
+
 int console_printk[4] = {
        CONSOLE_LOGLEVEL_DEFAULT,       /* console_loglevel */
        MESSAGE_LOGLEVEL_DEFAULT,       /* default_message_loglevel */
@@ -1754,6 +1758,10 @@ asmlinkage int vprintk_emit(int facility, int level,
                }
        }
 
+#ifdef CONFIG_EARLY_PRINTK_DIRECT
+       printascii(text);
+#endif
+
        if (level == LOGLEVEL_DEFAULT)
                level = default_message_loglevel;
 
index be922c9f3d37256fc060d5b9ba0aaf6a2d1f085c..b49cf3ac2d475a40a0558abed478dad5908ec376 100644 (file)
@@ -68,6 +68,7 @@ void rcu_sync_lockdep_assert(struct rcu_sync *rsp)
        RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(),
                         "suspicious rcu_sync_is_idle() usage");
 }
+EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert);
 #endif
 
 /**
@@ -82,6 +83,18 @@ void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
        rsp->gp_type = type;
 }
 
+/**
+ * Must be called after rcu_sync_init() and before first use.
+ *
+ * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
+ * pairs turn into NO-OPs.
+ */
+void rcu_sync_enter_start(struct rcu_sync *rsp)
+{
+       rsp->gp_count++;
+       rsp->gp_state = GP_PASSED;
+}
+
 /**
  * rcu_sync_enter() - Force readers onto slowpath
  * @rsp: Pointer to rcu_sync structure to use for synchronization
index 67687973ce80d63d3f52698fb4b738b76964b896..623ce4bde0d5123a3960087e7f6c9bb6e6b11c25 100644 (file)
@@ -14,8 +14,11 @@ endif
 obj-y += core.o loadavg.o clock.o cputime.o
 obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
 obj-y += wait.o completion.o idle.o
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
+obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o
+obj-$(CONFIG_SCHED_WALT) += walt.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
 obj-$(CONFIG_SCHED_DEBUG) += debug.o
+obj-$(CONFIG_SCHED_TUNE) += tune.o
 obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
+obj-$(CONFIG_CPU_FREQ_GOV_SCHED) += cpufreq_sched.o
index 20253dbc86103a544b2ad875d18c53a2cc36ddd3..1df6da0094f01448ff477f6c1b963b2b6c80c8f6 100644 (file)
@@ -89,6 +89,7 @@
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
+#include "walt.h"
 
 DEFINE_MUTEX(sched_domains_mutex);
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
@@ -287,6 +288,18 @@ int sysctl_sched_rt_runtime = 950000;
 /* cpus with isolated domains */
 cpumask_var_t cpu_isolated_map;
 
+struct rq *
+lock_rq_of(struct task_struct *p, unsigned long *flags)
+{
+       return task_rq_lock(p, flags);
+}
+
+void
+unlock_rq_of(struct rq *rq, struct task_struct *p, unsigned long *flags)
+{
+       task_rq_unlock(rq, p, flags);
+}
+
 /*
  * this_rq_lock - lock this runqueue and disable interrupts.
  */
@@ -1076,7 +1089,9 @@ static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new
 
        dequeue_task(rq, p, 0);
        p->on_rq = TASK_ON_RQ_MIGRATING;
+       double_lock_balance(rq, cpu_rq(new_cpu));
        set_task_cpu(p, new_cpu);
+       double_unlock_balance(rq, cpu_rq(new_cpu));
        raw_spin_unlock(&rq->lock);
 
        rq = cpu_rq(new_cpu);
@@ -1300,6 +1315,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
                        p->sched_class->migrate_task_rq(p);
                p->se.nr_migrations++;
                perf_event_task_migrate(p);
+
+               walt_fixup_busy_time(p, new_cpu);
        }
 
        __set_task_cpu(p, new_cpu);
@@ -1928,6 +1945,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 {
        unsigned long flags;
        int cpu, success = 0;
+#ifdef CONFIG_SMP
+       struct rq *rq;
+       u64 wallclock;
+#endif
 
        /*
         * If we are going to wake up a thread waiting for CONDITION we
@@ -2007,6 +2028,14 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
         */
        smp_rmb();
 
+       rq = cpu_rq(task_cpu(p));
+
+       raw_spin_lock(&rq->lock);
+       wallclock = walt_ktime_clock();
+       walt_update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+       walt_update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+       raw_spin_unlock(&rq->lock);
+
        p->sched_contributes_to_load = !!task_contributes_to_load(p);
        p->state = TASK_WAKING;
 
@@ -2014,10 +2043,12 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
                p->sched_class->task_waking(p);
 
        cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
+
        if (task_cpu(p) != cpu) {
                wake_flags |= WF_MIGRATED;
                set_task_cpu(p, cpu);
        }
+
 #endif /* CONFIG_SMP */
 
        ttwu_queue(p, cpu);
@@ -2066,8 +2097,13 @@ static void try_to_wake_up_local(struct task_struct *p)
 
        trace_sched_waking(p);
 
-       if (!task_on_rq_queued(p))
+       if (!task_on_rq_queued(p)) {
+               u64 wallclock = walt_ktime_clock();
+
+               walt_update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+               walt_update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
                ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+       }
 
        ttwu_do_wakeup(rq, p, 0);
        ttwu_stat(p, smp_processor_id(), 0);
@@ -2133,6 +2169,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
        p->se.nr_migrations             = 0;
        p->se.vruntime                  = 0;
        INIT_LIST_HEAD(&p->se.group_node);
+       walt_init_new_task_load(p);
 
 #ifdef CONFIG_SCHEDSTATS
        memset(&p->se.statistics, 0, sizeof(p->se.statistics));
@@ -2400,6 +2437,9 @@ void wake_up_new_task(struct task_struct *p)
        struct rq *rq;
 
        raw_spin_lock_irqsave(&p->pi_lock, flags);
+
+       walt_init_new_task_load(p);
+
        /* Initialize new task's runnable average */
        init_entity_runnable_average(&p->se);
 #ifdef CONFIG_SMP
@@ -2412,7 +2452,8 @@ void wake_up_new_task(struct task_struct *p)
 #endif
 
        rq = __task_rq_lock(p);
-       activate_task(rq, p, 0);
+       walt_mark_task_starting(p);
+       activate_task(rq, p, ENQUEUE_WAKEUP_NEW);
        p->on_rq = TASK_ON_RQ_QUEUED;
        trace_sched_wakeup_new(p);
        check_preempt_curr(rq, p, WF_FORK);
@@ -2793,6 +2834,36 @@ unsigned long nr_iowait_cpu(int cpu)
        return atomic_read(&this->nr_iowait);
 }
 
+#ifdef CONFIG_CPU_QUIET
+u64 nr_running_integral(unsigned int cpu)
+{
+       unsigned int seqcnt;
+       u64 integral;
+       struct rq *q;
+
+       if (cpu >= nr_cpu_ids)
+               return 0;
+
+       q = cpu_rq(cpu);
+
+       /*
+        * Update average to avoid reading stalled value if there were
+        * no run-queue changes for a long time. On the other hand if
+        * the changes are happening right now, just read current value
+        * directly.
+        */
+
+       seqcnt = read_seqcount_begin(&q->ave_seqcnt);
+       integral = do_nr_running_integral(q);
+       if (read_seqcount_retry(&q->ave_seqcnt, seqcnt)) {
+               read_seqcount_begin(&q->ave_seqcnt);
+               integral = q->nr_running_integral;
+       }
+
+       return integral;
+}
+#endif
+
 void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
 {
        struct rq *rq = this_rq();
@@ -2879,6 +2950,93 @@ unsigned long long task_sched_runtime(struct task_struct *p)
        return ns;
 }
 
+#ifdef CONFIG_CPU_FREQ_GOV_SCHED
+
+static inline
+unsigned long add_capacity_margin(unsigned long cpu_capacity)
+{
+       cpu_capacity  = cpu_capacity * capacity_margin;
+       cpu_capacity /= SCHED_CAPACITY_SCALE;
+       return cpu_capacity;
+}
+
+static inline
+unsigned long sum_capacity_reqs(unsigned long cfs_cap,
+                               struct sched_capacity_reqs *scr)
+{
+       unsigned long total = add_capacity_margin(cfs_cap + scr->rt);
+       return total += scr->dl;
+}
+
+static void sched_freq_tick_pelt(int cpu)
+{
+       unsigned long cpu_utilization = capacity_max;
+       unsigned long capacity_curr = capacity_curr_of(cpu);
+       struct sched_capacity_reqs *scr;
+
+       scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
+       if (sum_capacity_reqs(cpu_utilization, scr) < capacity_curr)
+               return;
+
+       /*
+        * To make free room for a task that is building up its "real"
+        * utilization and to harm its performance the least, request
+        * a jump to a higher OPP as soon as the margin of free capacity
+        * is impacted (specified by capacity_margin).
+        */
+       set_cfs_cpu_capacity(cpu, true, cpu_utilization);
+}
+
+#ifdef CONFIG_SCHED_WALT
+static void sched_freq_tick_walt(int cpu)
+{
+       unsigned long cpu_utilization = cpu_util(cpu);
+       unsigned long capacity_curr = capacity_curr_of(cpu);
+
+       if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
+               return sched_freq_tick_pelt(cpu);
+
+       /*
+        * Add a margin to the WALT utilization.
+        * NOTE: WALT tracks a single CPU signal for all the scheduling
+        * classes, thus this margin is going to be added to the DL class as
+        * well, which is something we do not do in sched_freq_tick_pelt case.
+        */
+       cpu_utilization = add_capacity_margin(cpu_utilization);
+       if (cpu_utilization <= capacity_curr)
+               return;
+
+       /*
+        * It is likely that the load is growing so we
+        * keep the added margin in our request as an
+        * extra boost.
+        */
+       set_cfs_cpu_capacity(cpu, true, cpu_utilization);
+
+}
+#define _sched_freq_tick(cpu) sched_freq_tick_walt(cpu)
+#else
+#define _sched_freq_tick(cpu) sched_freq_tick_pelt(cpu)
+#endif /* CONFIG_SCHED_WALT */
+
+static void sched_freq_tick(int cpu)
+{
+       unsigned long capacity_orig, capacity_curr;
+
+       if (!sched_freq())
+               return;
+
+       capacity_orig = capacity_orig_of(cpu);
+       capacity_curr = capacity_curr_of(cpu);
+       if (capacity_curr == capacity_orig)
+               return;
+
+       _sched_freq_tick(cpu);
+}
+#else
+static inline void sched_freq_tick(int cpu) { }
+#endif /* CONFIG_CPU_FREQ_GOV_SCHED */
+
 /*
  * This function gets called by the timer code, with HZ frequency.
  * We call it with interrupts disabled.
@@ -2892,10 +3050,14 @@ void scheduler_tick(void)
        sched_clock_tick();
 
        raw_spin_lock(&rq->lock);
+       walt_set_window_start(rq);
        update_rq_clock(rq);
        curr->sched_class->task_tick(rq, curr, 0);
        update_cpu_load_active(rq);
+       walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
+                       walt_ktime_clock(), 0);
        calc_global_load_tick(rq);
+       sched_freq_tick(cpu);
        raw_spin_unlock(&rq->lock);
 
        perf_event_task_tick();
@@ -3132,6 +3294,7 @@ static void __sched notrace __schedule(bool preempt)
        unsigned long *switch_count;
        struct rq *rq;
        int cpu;
+       u64 wallclock;
 
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
@@ -3193,6 +3356,9 @@ static void __sched notrace __schedule(bool preempt)
                update_rq_clock(rq);
 
        next = pick_next_task(rq, prev);
+       wallclock = walt_ktime_clock();
+       walt_update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
+       walt_update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
        clear_tsk_need_resched(prev);
        clear_preempt_need_resched();
        rq->clock_skip_update = 0;
@@ -5019,6 +5185,7 @@ void init_idle(struct task_struct *idle, int cpu)
        raw_spin_lock(&rq->lock);
 
        __sched_fork(0, idle);
+
        idle->state = TASK_RUNNING;
        idle->se.exec_start = sched_clock();
 
@@ -5400,10 +5567,61 @@ set_table_entry(struct ctl_table *entry,
        }
 }
 
+static struct ctl_table *
+sd_alloc_ctl_energy_table(struct sched_group_energy *sge)
+{
+       struct ctl_table *table = sd_alloc_ctl_entry(5);
+
+       if (table == NULL)
+               return NULL;
+
+       set_table_entry(&table[0], "nr_idle_states", &sge->nr_idle_states,
+                       sizeof(int), 0644, proc_dointvec_minmax, false);
+       set_table_entry(&table[1], "idle_states", &sge->idle_states[0].power,
+                       sge->nr_idle_states*sizeof(struct idle_state), 0644,
+                       proc_doulongvec_minmax, false);
+       set_table_entry(&table[2], "nr_cap_states", &sge->nr_cap_states,
+                       sizeof(int), 0644, proc_dointvec_minmax, false);
+       set_table_entry(&table[3], "cap_states", &sge->cap_states[0].cap,
+                       sge->nr_cap_states*sizeof(struct capacity_state), 0644,
+                       proc_doulongvec_minmax, false);
+
+       return table;
+}
+
+static struct ctl_table *
+sd_alloc_ctl_group_table(struct sched_group *sg)
+{
+       struct ctl_table *table = sd_alloc_ctl_entry(2);
+
+       if (table == NULL)
+               return NULL;
+
+       table->procname = kstrdup("energy", GFP_KERNEL);
+       table->mode = 0555;
+       table->child = sd_alloc_ctl_energy_table((struct sched_group_energy *)sg->sge);
+
+       return table;
+}
+
 static struct ctl_table *
 sd_alloc_ctl_domain_table(struct sched_domain *sd)
 {
-       struct ctl_table *table = sd_alloc_ctl_entry(14);
+       struct ctl_table *table;
+       unsigned int nr_entries = 14;
+
+       int i = 0;
+       struct sched_group *sg = sd->groups;
+
+       if (sg->sge) {
+               int nr_sgs = 0;
+
+               do {} while (nr_sgs++, sg = sg->next, sg != sd->groups);
+
+               nr_entries += nr_sgs;
+       }
+
+       table = sd_alloc_ctl_entry(nr_entries);
 
        if (table == NULL)
                return NULL;
@@ -5436,7 +5654,19 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
                sizeof(long), 0644, proc_doulongvec_minmax, false);
        set_table_entry(&table[12], "name", sd->name,
                CORENAME_MAX_SIZE, 0444, proc_dostring, false);
-       /* &table[13] is terminator */
+       sg = sd->groups;
+       if (sg->sge) {
+               char buf[32];
+               struct ctl_table *entry = &table[13];
+
+               do {
+                       snprintf(buf, 32, "group%d", i);
+                       entry->procname = kstrdup(buf, GFP_KERNEL);
+                       entry->mode = 0555;
+                       entry->child = sd_alloc_ctl_group_table(sg);
+               } while (entry++, i++, sg = sg->next, sg != sd->groups);
+       }
+       /* &table[nr_entries-1] is terminator */
 
        return table;
 }
@@ -5552,6 +5782,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
        switch (action & ~CPU_TASKS_FROZEN) {
 
        case CPU_UP_PREPARE:
+               raw_spin_lock_irqsave(&rq->lock, flags);
+               walt_set_window_start(rq);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
                rq->calc_load_update = calc_load_update;
                account_reset_rq(rq);
                break;
@@ -5572,6 +5805,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                sched_ttwu_pending();
                /* Update our root-domain */
                raw_spin_lock_irqsave(&rq->lock, flags);
+               walt_migrate_sync_cpu(cpu);
                if (rq->rd) {
                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
                        set_rq_offline(rq);
@@ -5743,7 +5977,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                printk(KERN_CONT " %*pbl",
                       cpumask_pr_args(sched_group_cpus(group)));
                if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
-                       printk(KERN_CONT " (cpu_capacity = %d)",
+                       printk(KERN_CONT " (cpu_capacity = %lu)",
                                group->sgc->capacity);
                }
 
@@ -5804,7 +6038,8 @@ static int sd_degenerate(struct sched_domain *sd)
                         SD_BALANCE_EXEC |
                         SD_SHARE_CPUCAPACITY |
                         SD_SHARE_PKG_RESOURCES |
-                        SD_SHARE_POWERDOMAIN)) {
+                        SD_SHARE_POWERDOMAIN |
+                        SD_SHARE_CAP_STATES)) {
                if (sd->groups != sd->groups->next)
                        return 0;
        }
@@ -5836,7 +6071,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
                                SD_SHARE_CPUCAPACITY |
                                SD_SHARE_PKG_RESOURCES |
                                SD_PREFER_SIBLING |
-                               SD_SHARE_POWERDOMAIN);
+                               SD_SHARE_POWERDOMAIN |
+                               SD_SHARE_CAP_STATES);
                if (nr_node_ids == 1)
                        pflags &= ~SD_SERIALIZE;
        }
@@ -5915,6 +6151,8 @@ static int init_rootdomain(struct root_domain *rd)
 
        if (cpupri_init(&rd->cpupri) != 0)
                goto free_rto_mask;
+
+       init_max_cpu_capacity(&rd->max_cpu_capacity);
        return 0;
 
 free_rto_mask:
@@ -6020,11 +6258,13 @@ DEFINE_PER_CPU(int, sd_llc_id);
 DEFINE_PER_CPU(struct sched_domain *, sd_numa);
 DEFINE_PER_CPU(struct sched_domain *, sd_busy);
 DEFINE_PER_CPU(struct sched_domain *, sd_asym);
+DEFINE_PER_CPU(struct sched_domain *, sd_ea);
+DEFINE_PER_CPU(struct sched_domain *, sd_scs);
 
 static void update_top_cache_domain(int cpu)
 {
        struct sched_domain *sd;
-       struct sched_domain *busy_sd = NULL;
+       struct sched_domain *busy_sd = NULL, *ea_sd = NULL;
        int id = cpu;
        int size = 1;
 
@@ -6045,6 +6285,17 @@ static void update_top_cache_domain(int cpu)
 
        sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
        rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
+
+       for_each_domain(cpu, sd) {
+               if (sd->groups->sge)
+                       ea_sd = sd;
+               else
+                       break;
+       }
+       rcu_assign_pointer(per_cpu(sd_ea, cpu), ea_sd);
+
+       sd = highest_flag_domain(cpu, SD_SHARE_CAP_STATES);
+       rcu_assign_pointer(per_cpu(sd_scs, cpu), sd);
 }
 
 /*
@@ -6205,6 +6456,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                 * die on a /0 trap.
                 */
                sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
+               sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
 
                /*
                 * Make sure the first group of this domain contains the
@@ -6333,6 +6585,66 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
        atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
 }
 
+/*
+ * Check that the per-cpu provided sd energy data is consistent for all cpus
+ * within the mask.
+ */
+static inline void check_sched_energy_data(int cpu, sched_domain_energy_f fn,
+                                          const struct cpumask *cpumask)
+{
+       const struct sched_group_energy * const sge = fn(cpu);
+       struct cpumask mask;
+       int i;
+
+       if (cpumask_weight(cpumask) <= 1)
+               return;
+
+       cpumask_xor(&mask, cpumask, get_cpu_mask(cpu));
+
+       for_each_cpu(i, &mask) {
+               const struct sched_group_energy * const e = fn(i);
+               int y;
+
+               BUG_ON(e->nr_idle_states != sge->nr_idle_states);
+
+               for (y = 0; y < (e->nr_idle_states); y++) {
+                       BUG_ON(e->idle_states[y].power !=
+                                       sge->idle_states[y].power);
+               }
+
+               BUG_ON(e->nr_cap_states != sge->nr_cap_states);
+
+               for (y = 0; y < (e->nr_cap_states); y++) {
+                       BUG_ON(e->cap_states[y].cap != sge->cap_states[y].cap);
+                       BUG_ON(e->cap_states[y].power !=
+                                       sge->cap_states[y].power);
+               }
+       }
+}
+
+static void init_sched_energy(int cpu, struct sched_domain *sd,
+                             sched_domain_energy_f fn)
+{
+       if (!(fn && fn(cpu)))
+               return;
+
+       if (cpu != group_balance_cpu(sd->groups))
+               return;
+
+       if (sd->child && !sd->child->groups->sge) {
+               pr_err("BUG: EAS setup broken for CPU%d\n", cpu);
+#ifdef CONFIG_SCHED_DEBUG
+               pr_err("     energy data on %s but not on %s domain\n",
+                       sd->name, sd->child->name);
+#endif
+               return;
+       }
+
+       check_sched_energy_data(cpu, fn, sched_group_cpus(sd->groups));
+
+       sd->groups->sge = fn(cpu);
+}
+
 /*
  * Initializers for schedule domains
  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
@@ -6441,6 +6753,7 @@ static int sched_domains_curr_level;
  * SD_SHARE_PKG_RESOURCES - describes shared caches
  * SD_NUMA                - describes NUMA topologies
  * SD_SHARE_POWERDOMAIN   - describes shared power domain
+ * SD_SHARE_CAP_STATES    - describes shared capacity states
  *
  * Odd one out:
  * SD_ASYM_PACKING        - describes SMT quirks
@@ -6450,7 +6763,8 @@ static int sched_domains_curr_level;
         SD_SHARE_PKG_RESOURCES |       \
         SD_NUMA |                      \
         SD_ASYM_PACKING |              \
-        SD_SHARE_POWERDOMAIN)
+        SD_SHARE_POWERDOMAIN |         \
+        SD_SHARE_CAP_STATES)
 
 static struct sched_domain *
 sd_init(struct sched_domain_topology_level *tl, int cpu)
@@ -7000,6 +7314,7 @@ static int build_sched_domains(const struct cpumask *cpu_map,
        enum s_alloc alloc_state;
        struct sched_domain *sd;
        struct s_data d;
+       struct rq *rq = NULL;
        int i, ret = -ENOMEM;
 
        alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
@@ -7038,10 +7353,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
 
        /* Calculate CPU capacity for physical packages and nodes */
        for (i = nr_cpumask_bits-1; i >= 0; i--) {
+               struct sched_domain_topology_level *tl = sched_domain_topology;
+
                if (!cpumask_test_cpu(i, cpu_map))
                        continue;
 
-               for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
+               for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent, tl++) {
+                       init_sched_energy(i, sd, tl->energy);
                        claim_allocations(i, sd);
                        init_sched_groups_capacity(i, sd);
                }
@@ -7050,6 +7368,7 @@ static int build_sched_domains(const struct cpumask *cpu_map,
        /* Attach the domains */
        rcu_read_lock();
        for_each_cpu(i, cpu_map) {
+               rq = cpu_rq(i);
                sd = *per_cpu_ptr(d.sd, i);
                cpu_attach_domain(sd, d.rd, i);
        }
@@ -7331,6 +7650,7 @@ void __init sched_init_smp(void)
 {
        cpumask_var_t non_isolated_cpus;
 
+       walt_init_cpu_efficiency();
        alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
        alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
 
@@ -7508,6 +7828,11 @@ void __init sched_init(void)
                rq->idle_stamp = 0;
                rq->avg_idle = 2*sysctl_sched_migration_cost;
                rq->max_idle_balance_cost = sysctl_sched_migration_cost;
+#ifdef CONFIG_SCHED_WALT
+               rq->cur_irqload = 0;
+               rq->avg_irqload = 0;
+               rq->irqload_ts = 0;
+#endif
 
                INIT_LIST_HEAD(&rq->cfs_tasks);
 
@@ -7571,6 +7896,14 @@ static inline int preempt_count_equals(int preempt_offset)
        return (nested == preempt_offset);
 }
 
+static int __might_sleep_init_called;
+int __init __might_sleep_init(void)
+{
+       __might_sleep_init_called = 1;
+       return 0;
+}
+early_initcall(__might_sleep_init);
+
 void __might_sleep(const char *file, int line, int preempt_offset)
 {
        /*
@@ -7595,8 +7928,10 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
 
        rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
        if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
-            !is_idle_task(current)) ||
-           system_state != SYSTEM_RUNNING || oops_in_progress)
+            !is_idle_task(current)) || oops_in_progress)
+               return;
+       if (system_state != SYSTEM_RUNNING &&
+           (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
                return;
        if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
                return;
diff --git a/kernel/sched/cpufreq_sched.c b/kernel/sched/cpufreq_sched.c
new file mode 100644 (file)
index 0000000..d751bc2
--- /dev/null
@@ -0,0 +1,499 @@
+/*
+ *  Copyright (C)  2015 Michael Turquette <mturquette@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+#include <linux/irq_work.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_sched.h>
+
+#include "sched.h"
+
+#define THROTTLE_DOWN_NSEC     50000000 /* 50ms default */
+#define THROTTLE_UP_NSEC       500000 /* 500us default */
+
+struct static_key __read_mostly __sched_freq = STATIC_KEY_INIT_FALSE;
+static bool __read_mostly cpufreq_driver_slow;
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED
+static struct cpufreq_governor cpufreq_gov_sched;
+#endif
+
+static DEFINE_PER_CPU(unsigned long, enabled);
+DEFINE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
+
+/**
+ * gov_data - per-policy data internal to the governor
+ * @up_throttle: next throttling period expiry if increasing OPP
+ * @down_throttle: next throttling period expiry if decreasing OPP
+ * @up_throttle_nsec: throttle period length in nanoseconds if increasing OPP
+ * @down_throttle_nsec: throttle period length in nanoseconds if decreasing OPP
+ * @task: worker thread for dvfs transition that may block/sleep
+ * @irq_work: callback used to wake up worker thread
+ * @requested_freq: last frequency requested by the sched governor
+ *
+ * struct gov_data is the per-policy cpufreq_sched-specific data structure. A
+ * per-policy instance of it is created when the cpufreq_sched governor receives
+ * the CPUFREQ_GOV_START condition and a pointer to it exists in the gov_data
+ * member of struct cpufreq_policy.
+ *
+ * Readers of this data must call down_read(policy->rwsem). Writers must
+ * call down_write(policy->rwsem).
+ */
+struct gov_data {
+       ktime_t up_throttle;
+       ktime_t down_throttle;
+       unsigned int up_throttle_nsec;
+       unsigned int down_throttle_nsec;
+       struct task_struct *task;
+       struct irq_work irq_work;
+       unsigned int requested_freq;
+};
+
+static void cpufreq_sched_try_driver_target(struct cpufreq_policy *policy,
+                                           unsigned int freq)
+{
+       struct gov_data *gd = policy->governor_data;
+
+       /* avoid race with cpufreq_sched_stop */
+       if (!down_write_trylock(&policy->rwsem))
+               return;
+
+       __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
+
+       gd->up_throttle = ktime_add_ns(ktime_get(), gd->up_throttle_nsec);
+       gd->down_throttle = ktime_add_ns(ktime_get(), gd->down_throttle_nsec);
+       up_write(&policy->rwsem);
+}
+
+static bool finish_last_request(struct gov_data *gd, unsigned int cur_freq)
+{
+       ktime_t now = ktime_get();
+
+       ktime_t throttle = gd->requested_freq < cur_freq ?
+               gd->down_throttle : gd->up_throttle;
+
+       if (ktime_after(now, throttle))
+               return false;
+
+       while (1) {
+               int usec_left = ktime_to_ns(ktime_sub(throttle, now));
+
+               usec_left /= NSEC_PER_USEC;
+               trace_cpufreq_sched_throttled(usec_left);
+               usleep_range(usec_left, usec_left + 100);
+               now = ktime_get();
+               if (ktime_after(now, throttle))
+                       return true;
+       }
+}
+
+/*
+ * we pass in struct cpufreq_policy. This is safe because changing out the
+ * policy requires a call to __cpufreq_governor(policy, CPUFREQ_GOV_STOP),
+ * which tears down all of the data structures and __cpufreq_governor(policy,
+ * CPUFREQ_GOV_START) will do a full rebuild, including this kthread with the
+ * new policy pointer
+ */
+static int cpufreq_sched_thread(void *data)
+{
+       struct sched_param param;
+       struct cpufreq_policy *policy;
+       struct gov_data *gd;
+       unsigned int new_request = 0;
+       unsigned int last_request = 0;
+       int ret;
+
+       policy = (struct cpufreq_policy *) data;
+       gd = policy->governor_data;
+
+       param.sched_priority = 50;
+       ret = sched_setscheduler_nocheck(gd->task, SCHED_FIFO, &param);
+       if (ret) {
+               pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
+               do_exit(-EINVAL);
+       } else {
+               pr_debug("%s: kthread (%d) set to SCHED_FIFO\n",
+                               __func__, gd->task->pid);
+       }
+
+       do {
+               new_request = gd->requested_freq;
+               if (new_request == last_request) {
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       if (kthread_should_stop())
+                               break;
+                       schedule();
+               } else {
+                       /*
+                        * if the frequency thread sleeps while waiting to be
+                        * unthrottled, start over to check for a newer request
+                        */
+                       if (finish_last_request(gd, policy->cur))
+                               continue;
+                       last_request = new_request;
+                       cpufreq_sched_try_driver_target(policy, new_request);
+               }
+       } while (!kthread_should_stop());
+
+       return 0;
+}
+
+static void cpufreq_sched_irq_work(struct irq_work *irq_work)
+{
+       struct gov_data *gd;
+
+       gd = container_of(irq_work, struct gov_data, irq_work);
+       if (!gd)
+               return;
+
+       wake_up_process(gd->task);
+}
+
+static void update_fdomain_capacity_request(int cpu)
+{
+       unsigned int freq_new, index_new, cpu_tmp;
+       struct cpufreq_policy *policy;
+       struct gov_data *gd;
+       unsigned long capacity = 0;
+
+       /*
+        * Avoid grabbing the policy if possible. A test is still
+        * required after locking the CPU's policy to avoid racing
+        * with the governor changing.
+        */
+       if (!per_cpu(enabled, cpu))
+               return;
+
+       policy = cpufreq_cpu_get(cpu);
+       if (IS_ERR_OR_NULL(policy))
+               return;
+
+       if (policy->governor != &cpufreq_gov_sched ||
+           !policy->governor_data)
+               goto out;
+
+       gd = policy->governor_data;
+
+       /* find max capacity requested by cpus in this policy */
+       for_each_cpu(cpu_tmp, policy->cpus) {
+               struct sched_capacity_reqs *scr;
+
+               scr = &per_cpu(cpu_sched_capacity_reqs, cpu_tmp);
+               capacity = max(capacity, scr->total);
+       }
+
+       /* Convert the new maximum capacity request into a cpu frequency */
+       freq_new = capacity * policy->max >> SCHED_CAPACITY_SHIFT;
+       if (cpufreq_frequency_table_target(policy, policy->freq_table,
+                                          freq_new, CPUFREQ_RELATION_L,
+                                          &index_new))
+               goto out;
+       freq_new = policy->freq_table[index_new].frequency;
+
+       if (freq_new > policy->max)
+               freq_new = policy->max;
+
+       if (freq_new < policy->min)
+               freq_new = policy->min;
+
+       trace_cpufreq_sched_request_opp(cpu, capacity, freq_new,
+                                       gd->requested_freq);
+       if (freq_new == gd->requested_freq)
+               goto out;
+
+       gd->requested_freq = freq_new;
+
+       /*
+        * Throttling is not yet supported on platforms with fast cpufreq
+        * drivers.
+        */
+       if (cpufreq_driver_slow)
+               irq_work_queue_on(&gd->irq_work, cpu);
+       else
+               cpufreq_sched_try_driver_target(policy, freq_new);
+
+out:
+       cpufreq_cpu_put(policy);
+}
+
+void update_cpu_capacity_request(int cpu, bool request)
+{
+       unsigned long new_capacity;
+       struct sched_capacity_reqs *scr;
+
+       /* The rq lock serializes access to the CPU's sched_capacity_reqs. */
+       lockdep_assert_held(&cpu_rq(cpu)->lock);
+
+       scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
+
+       new_capacity = scr->cfs + scr->rt;
+       new_capacity = new_capacity * capacity_margin
+               / SCHED_CAPACITY_SCALE;
+       new_capacity += scr->dl;
+
+       if (new_capacity == scr->total)
+               return;
+
+       trace_cpufreq_sched_update_capacity(cpu, request, scr, new_capacity);
+
+       scr->total = new_capacity;
+       if (request)
+               update_fdomain_capacity_request(cpu);
+}
+
+static inline void set_sched_freq(void)
+{
+       static_key_slow_inc(&__sched_freq);
+}
+
+static inline void clear_sched_freq(void)
+{
+       static_key_slow_dec(&__sched_freq);
+}
+
+static struct attribute_group sched_attr_group_gov_pol;
+static struct attribute_group *get_sysfs_attr(void)
+{
+       return &sched_attr_group_gov_pol;
+}
+
+static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
+{
+       struct gov_data *gd;
+       int cpu;
+       int rc;
+
+       for_each_cpu(cpu, policy->cpus)
+               memset(&per_cpu(cpu_sched_capacity_reqs, cpu), 0,
+                      sizeof(struct sched_capacity_reqs));
+
+       gd = kzalloc(sizeof(*gd), GFP_KERNEL);
+       if (!gd)
+               return -ENOMEM;
+
+       gd->up_throttle_nsec = policy->cpuinfo.transition_latency ?
+                           policy->cpuinfo.transition_latency :
+                           THROTTLE_UP_NSEC;
+       gd->down_throttle_nsec = THROTTLE_DOWN_NSEC;
+       pr_debug("%s: throttle threshold = %u [ns]\n",
+                 __func__, gd->up_throttle_nsec);
+
+       rc = sysfs_create_group(&policy->kobj, get_sysfs_attr());
+       if (rc) {
+               pr_err("%s: couldn't create sysfs attributes: %d\n", __func__, rc);
+               goto err;
+       }
+
+       policy->governor_data = gd;
+       if (cpufreq_driver_is_slow()) {
+               cpufreq_driver_slow = true;
+               gd->task = kthread_create(cpufreq_sched_thread, policy,
+                                         "kschedfreq:%d",
+                                         cpumask_first(policy->related_cpus));
+               if (IS_ERR_OR_NULL(gd->task)) {
+                       pr_err("%s: failed to create kschedfreq thread\n",
+                              __func__);
+                       goto err;
+               }
+               get_task_struct(gd->task);
+               kthread_bind_mask(gd->task, policy->related_cpus);
+               wake_up_process(gd->task);
+               init_irq_work(&gd->irq_work, cpufreq_sched_irq_work);
+       }
+
+       set_sched_freq();
+
+       return 0;
+
+err:
+       policy->governor_data = NULL;
+       kfree(gd);
+       return -ENOMEM;
+}
+
+static int cpufreq_sched_policy_exit(struct cpufreq_policy *policy)
+{
+       struct gov_data *gd = policy->governor_data;
+
+       clear_sched_freq();
+       if (cpufreq_driver_slow) {
+               kthread_stop(gd->task);
+               put_task_struct(gd->task);
+       }
+
+       sysfs_remove_group(&policy->kobj, get_sysfs_attr());
+
+       policy->governor_data = NULL;
+
+       kfree(gd);
+       return 0;
+}
+
+static int cpufreq_sched_start(struct cpufreq_policy *policy)
+{
+       int cpu;
+
+       for_each_cpu(cpu, policy->cpus)
+               per_cpu(enabled, cpu) = 1;
+
+       return 0;
+}
+
+static void cpufreq_sched_limits(struct cpufreq_policy *policy)
+{
+       unsigned int clamp_freq;
+       struct gov_data *gd = policy->governor_data;;
+
+       pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
+               policy->cpu, policy->min, policy->max,
+               policy->cur);
+
+       clamp_freq = clamp(gd->requested_freq, policy->min, policy->max);
+
+       if (policy->cur != clamp_freq)
+               __cpufreq_driver_target(policy, clamp_freq, CPUFREQ_RELATION_L);
+}
+
+static int cpufreq_sched_stop(struct cpufreq_policy *policy)
+{
+       int cpu;
+
+       for_each_cpu(cpu, policy->cpus)
+               per_cpu(enabled, cpu) = 0;
+
+       return 0;
+}
+
+static int cpufreq_sched_setup(struct cpufreq_policy *policy,
+                              unsigned int event)
+{
+       switch (event) {
+       case CPUFREQ_GOV_POLICY_INIT:
+               return cpufreq_sched_policy_init(policy);
+       case CPUFREQ_GOV_POLICY_EXIT:
+               return cpufreq_sched_policy_exit(policy);
+       case CPUFREQ_GOV_START:
+               return cpufreq_sched_start(policy);
+       case CPUFREQ_GOV_STOP:
+               return cpufreq_sched_stop(policy);
+       case CPUFREQ_GOV_LIMITS:
+               cpufreq_sched_limits(policy);
+               break;
+       }
+       return 0;
+}
+
+/* Tunables */
+static ssize_t show_up_throttle_nsec(struct gov_data *gd, char *buf)
+{
+       return sprintf(buf, "%u\n", gd->up_throttle_nsec);
+}
+
+static ssize_t store_up_throttle_nsec(struct gov_data *gd,
+               const char *buf, size_t count)
+{
+       int ret;
+       long unsigned int val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       gd->up_throttle_nsec = val;
+       return count;
+}
+
+static ssize_t show_down_throttle_nsec(struct gov_data *gd, char *buf)
+{
+       return sprintf(buf, "%u\n", gd->down_throttle_nsec);
+}
+
+static ssize_t store_down_throttle_nsec(struct gov_data *gd,
+               const char *buf, size_t count)
+{
+       int ret;
+       long unsigned int val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       gd->down_throttle_nsec = val;
+       return count;
+}
+
+/*
+ * Create show/store routines
+ * - sys: One governor instance for complete SYSTEM
+ * - pol: One governor instance per struct cpufreq_policy
+ */
+#define show_gov_pol_sys(file_name)                                    \
+static ssize_t show_##file_name##_gov_pol                              \
+(struct cpufreq_policy *policy, char *buf)                             \
+{                                                                      \
+       return show_##file_name(policy->governor_data, buf);            \
+}
+
+#define store_gov_pol_sys(file_name)                                   \
+static ssize_t store_##file_name##_gov_pol                             \
+(struct cpufreq_policy *policy, const char *buf, size_t count)         \
+{                                                                      \
+       return store_##file_name(policy->governor_data, buf, count);    \
+}
+
+#define gov_pol_attr_rw(_name)                                         \
+       static struct freq_attr _name##_gov_pol =                               \
+       __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define show_store_gov_pol_sys(file_name)                              \
+       show_gov_pol_sys(file_name);                                            \
+       store_gov_pol_sys(file_name)
+#define tunable_handlers(file_name) \
+       show_gov_pol_sys(file_name); \
+       store_gov_pol_sys(file_name); \
+       gov_pol_attr_rw(file_name)
+
+tunable_handlers(down_throttle_nsec);
+tunable_handlers(up_throttle_nsec);
+
+/* Per policy governor instance */
+static struct attribute *sched_attributes_gov_pol[] = {
+       &up_throttle_nsec_gov_pol.attr,
+       &down_throttle_nsec_gov_pol.attr,
+       NULL,
+};
+
+static struct attribute_group sched_attr_group_gov_pol = {
+       .attrs = sched_attributes_gov_pol,
+       .name = "sched",
+};
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED
+static
+#endif
+struct cpufreq_governor cpufreq_gov_sched = {
+       .name                   = "sched",
+       .governor               = cpufreq_sched_setup,
+       .owner                  = THIS_MODULE,
+};
+
+static int __init cpufreq_sched_init(void)
+{
+       int cpu;
+
+       for_each_cpu(cpu, cpu_possible_mask)
+               per_cpu(enabled, cpu) = 0;
+       return cpufreq_register_governor(&cpufreq_gov_sched);
+}
+
+/* Try to make this the default governor */
+fs_initcall(cpufreq_sched_init);
index a1aecbedf5b14e75f19dd644157f540ecd08a5b5..acde1d7c763ceeafdcb6cd0f009c95adccda6332 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/static_key.h>
 #include <linux/context_tracking.h>
 #include "sched.h"
+#include "walt.h"
 
 
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -49,6 +50,10 @@ void irqtime_account_irq(struct task_struct *curr)
        unsigned long flags;
        s64 delta;
        int cpu;
+#ifdef CONFIG_SCHED_WALT
+       u64 wallclock;
+       bool account = true;
+#endif
 
        if (!sched_clock_irqtime)
                return;
@@ -56,6 +61,9 @@ void irqtime_account_irq(struct task_struct *curr)
        local_irq_save(flags);
 
        cpu = smp_processor_id();
+#ifdef CONFIG_SCHED_WALT
+       wallclock = sched_clock_cpu(cpu);
+#endif
        delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
        __this_cpu_add(irq_start_time, delta);
 
@@ -70,8 +78,16 @@ void irqtime_account_irq(struct task_struct *curr)
                __this_cpu_add(cpu_hardirq_time, delta);
        else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
                __this_cpu_add(cpu_softirq_time, delta);
+#ifdef CONFIG_SCHED_WALT
+       else
+               account = false;
+#endif
 
        irq_time_write_end();
+#ifdef CONFIG_SCHED_WALT
+       if (account)
+               walt_account_irqtime(cpu, curr, delta, wallclock);
+#endif
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(irqtime_account_irq);
index 8b0a15e285f9121ccd5540fa11eef49c94f017c1..9d9eb50d40598a1c0dc0897777c0a81882383a21 100644 (file)
@@ -43,6 +43,24 @@ static inline int on_dl_rq(struct sched_dl_entity *dl_se)
        return !RB_EMPTY_NODE(&dl_se->rb_node);
 }
 
+static void add_average_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+       u64 se_bw = dl_se->dl_bw;
+
+       dl_rq->avg_bw += se_bw;
+}
+
+static void clear_average_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+       u64 se_bw = dl_se->dl_bw;
+
+       dl_rq->avg_bw -= se_bw;
+       if (dl_rq->avg_bw < 0) {
+               WARN_ON(1);
+               dl_rq->avg_bw = 0;
+       }
+}
+
 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
 {
        struct sched_dl_entity *dl_se = &p->dl;
@@ -494,6 +512,9 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
        struct rq *rq = rq_of_dl_rq(dl_rq);
 
+       if (dl_se->dl_new)
+               add_average_bw(dl_se, dl_rq);
+
        /*
         * The arrival of a new instance needs special treatment, i.e.,
         * the actual scheduling parameters have to be "renewed".
@@ -741,8 +762,6 @@ static void update_curr_dl(struct rq *rq)
        curr->se.exec_start = rq_clock_task(rq);
        cpuacct_charge(curr, delta_exec);
 
-       sched_rt_avg_update(rq, delta_exec);
-
        dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
        if (dl_runtime_exceeded(dl_se)) {
                dl_se->dl_throttled = 1;
@@ -1241,6 +1260,8 @@ static void task_fork_dl(struct task_struct *p)
 static void task_dead_dl(struct task_struct *p)
 {
        struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+       struct dl_rq *dl_rq = dl_rq_of_se(&p->dl);
+       struct rq *rq = rq_of_dl_rq(dl_rq);
 
        /*
         * Since we are TASK_DEAD we won't slip out of the domain!
@@ -1249,6 +1270,8 @@ static void task_dead_dl(struct task_struct *p)
        /* XXX we should retain the bw until 0-lag */
        dl_b->total_bw -= p->dl.dl_bw;
        raw_spin_unlock_irq(&dl_b->lock);
+
+       clear_average_bw(&p->dl, &rq->dl);
 }
 
 static void set_curr_task_dl(struct rq *rq)
@@ -1556,7 +1579,9 @@ retry:
        }
 
        deactivate_task(rq, next_task, 0);
+       clear_average_bw(&next_task->dl, &rq->dl);
        set_task_cpu(next_task, later_rq->cpu);
+       add_average_bw(&next_task->dl, &later_rq->dl);
        activate_task(later_rq, next_task, 0);
        ret = 1;
 
@@ -1644,7 +1669,9 @@ static void pull_dl_task(struct rq *this_rq)
                        resched = true;
 
                        deactivate_task(src_rq, p, 0);
+                       clear_average_bw(&p->dl, &src_rq->dl);
                        set_task_cpu(p, this_cpu);
+                       add_average_bw(&p->dl, &this_rq->dl);
                        activate_task(this_rq, p, 0);
                        dmin = p->dl.deadline;
 
@@ -1750,6 +1777,8 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
        if (!start_dl_timer(p))
                __dl_clear_params(p);
 
+       clear_average_bw(&p->dl, &rq->dl);
+
        /*
         * Since this might be the only -deadline task on the rq,
         * this is the right place to try to pull some other one
diff --git a/kernel/sched/energy.c b/kernel/sched/energy.c
new file mode 100644 (file)
index 0000000..b0656b7
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Obtain energy cost data from DT and populate relevant scheduler data
+ * structures.
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#define pr_fmt(fmt) "sched-energy: " fmt
+
+#define DEBUG
+
+#include <linux/gfp.h>
+#include <linux/of.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/sched_energy.h>
+#include <linux/stddef.h>
+
+struct sched_group_energy *sge_array[NR_CPUS][NR_SD_LEVELS];
+
+static void free_resources(void)
+{
+       int cpu, sd_level;
+       struct sched_group_energy *sge;
+
+       for_each_possible_cpu(cpu) {
+               for_each_possible_sd_level(sd_level) {
+                       sge = sge_array[cpu][sd_level];
+                       if (sge) {
+                               kfree(sge->cap_states);
+                               kfree(sge->idle_states);
+                               kfree(sge);
+                       }
+               }
+       }
+}
+
+void init_sched_energy_costs(void)
+{
+       struct device_node *cn, *cp;
+       struct capacity_state *cap_states;
+       struct idle_state *idle_states;
+       struct sched_group_energy *sge;
+       const struct property *prop;
+       int sd_level, i, nstates, cpu;
+       const __be32 *val;
+
+       for_each_possible_cpu(cpu) {
+               cn = of_get_cpu_node(cpu, NULL);
+               if (!cn) {
+                       pr_warn("CPU device node missing for CPU %d\n", cpu);
+                       return;
+               }
+
+               if (!of_find_property(cn, "sched-energy-costs", NULL)) {
+                       pr_warn("CPU device node has no sched-energy-costs\n");
+                       return;
+               }
+
+               for_each_possible_sd_level(sd_level) {
+                       cp = of_parse_phandle(cn, "sched-energy-costs", sd_level);
+                       if (!cp)
+                               break;
+
+                       prop = of_find_property(cp, "busy-cost-data", NULL);
+                       if (!prop || !prop->value) {
+                               pr_warn("No busy-cost data, skipping sched_energy init\n");
+                               goto out;
+                       }
+
+                       sge = kcalloc(1, sizeof(struct sched_group_energy),
+                                     GFP_NOWAIT);
+
+                       nstates = (prop->length / sizeof(u32)) / 2;
+                       cap_states = kcalloc(nstates,
+                                            sizeof(struct capacity_state),
+                                            GFP_NOWAIT);
+
+                       for (i = 0, val = prop->value; i < nstates; i++) {
+                               cap_states[i].cap = be32_to_cpup(val++);
+                               cap_states[i].power = be32_to_cpup(val++);
+                       }
+
+                       sge->nr_cap_states = nstates;
+                       sge->cap_states = cap_states;
+
+                       prop = of_find_property(cp, "idle-cost-data", NULL);
+                       if (!prop || !prop->value) {
+                               pr_warn("No idle-cost data, skipping sched_energy init\n");
+                               goto out;
+                       }
+
+                       nstates = (prop->length / sizeof(u32));
+                       idle_states = kcalloc(nstates,
+                                             sizeof(struct idle_state),
+                                             GFP_NOWAIT);
+
+                       for (i = 0, val = prop->value; i < nstates; i++)
+                               idle_states[i].power = be32_to_cpup(val++);
+
+                       sge->nr_idle_states = nstates;
+                       sge->idle_states = idle_states;
+
+                       sge_array[cpu][sd_level] = sge;
+               }
+       }
+
+       pr_info("Sched-energy-costs installed from DT\n");
+       return;
+
+out:
+       free_resources();
+}
index 8f258f437ac2c12d566e87aaed751d7d7387fb4e..30d76a18ae1a7fe9c939f4501e9dea65aed3493b 100644 (file)
 #include <linux/mempolicy.h>
 #include <linux/migrate.h>
 #include <linux/task_work.h>
+#include <linux/module.h>
 
 #include <trace/events/sched.h>
 
 #include "sched.h"
+#include "tune.h"
+#include "walt.h"
 
 /*
  * Targeted preemption latency for CPU-bound tasks:
 unsigned int sysctl_sched_latency = 6000000ULL;
 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
 
+unsigned int sysctl_sched_is_big_little = 0;
+unsigned int sysctl_sched_sync_hint_enable = 1;
+unsigned int sysctl_sched_initial_task_util = 0;
+unsigned int sysctl_sched_cstate_aware = 1;
+
+#ifdef CONFIG_SCHED_WALT
+unsigned int sysctl_sched_use_walt_cpu_util = 1;
+unsigned int sysctl_sched_use_walt_task_util = 1;
+__read_mostly unsigned int sysctl_sched_walt_cpu_high_irqload =
+    (10 * NSEC_PER_MSEC);
+#endif
 /*
  * The initial- and re-scaling of tunables is configurable
  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
@@ -682,7 +696,9 @@ void init_entity_runnable_average(struct sched_entity *se)
        sa->period_contrib = 1023;
        sa->load_avg = scale_load_down(se->load.weight);
        sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
-       sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
+       sa->util_avg =  sched_freq() ?
+               sysctl_sched_initial_task_util :
+               scale_load_down(SCHED_LOAD_SCALE);
        sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
        /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
 }
@@ -2600,6 +2616,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
 
        scale_freq = arch_scale_freq_capacity(NULL, cpu);
        scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+       trace_sched_contrib_scale_f(cpu, scale_freq, scale_cpu);
 
        /* delta_w is the amount already accumulated against our next period */
        delta_w = sa->period_contrib;
@@ -2760,6 +2777,10 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
 
        if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
                update_tg_load_avg(cfs_rq, 0);
+
+       if (entity_is_task(se))
+               trace_sched_load_avg_task(task_of(se), &se->avg);
+       trace_sched_load_avg_cpu(cpu, cfs_rq);
 }
 
 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -2840,27 +2861,45 @@ dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
                max_t(s64,  cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
 }
 
-/*
- * Task first catches up with cfs_rq, and then subtract
- * itself from the cfs_rq (task must be off the queue now).
- */
-void remove_entity_load_avg(struct sched_entity *se)
-{
-       struct cfs_rq *cfs_rq = cfs_rq_of(se);
-       u64 last_update_time;
-
 #ifndef CONFIG_64BIT
+static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
+{
        u64 last_update_time_copy;
+       u64 last_update_time;
 
        do {
                last_update_time_copy = cfs_rq->load_last_update_time_copy;
                smp_rmb();
                last_update_time = cfs_rq->avg.last_update_time;
        } while (last_update_time != last_update_time_copy);
+
+       return last_update_time;
+}
 #else
-       last_update_time = cfs_rq->avg.last_update_time;
+static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
+{
+       return cfs_rq->avg.last_update_time;
+}
 #endif
 
+/*
+ * Task first catches up with cfs_rq, and then subtract
+ * itself from the cfs_rq (task must be off the queue now).
+ */
+void remove_entity_load_avg(struct sched_entity *se)
+{
+       struct cfs_rq *cfs_rq = cfs_rq_of(se);
+       u64 last_update_time;
+
+       /*
+        * Newly created task or never used group entity should not be removed
+        * from its (source) cfs_rq
+        */
+       if (se->avg.last_update_time == 0)
+               return;
+
+       last_update_time = cfs_rq_last_update_time(cfs_rq);
+
        __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
        atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
        atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
@@ -2962,6 +3001,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
                        }
 
                        trace_sched_stat_blocked(tsk, delta);
+                       trace_sched_blocked_reason(tsk);
 
                        /*
                         * Blocking time is in units of nanosecs, so shift by
@@ -4157,6 +4197,28 @@ static inline void hrtick_update(struct rq *rq)
 }
 #endif
 
+#ifdef CONFIG_SMP
+static bool cpu_overutilized(int cpu);
+static inline unsigned long boosted_cpu_util(int cpu);
+#else
+#define boosted_cpu_util(cpu) cpu_util(cpu)
+#endif
+
+#ifdef CONFIG_SMP
+static void update_capacity_of(int cpu)
+{
+       unsigned long req_cap;
+
+       if (!sched_freq())
+               return;
+
+       /* Convert scale-invariant capacity to cpu. */
+       req_cap = boosted_cpu_util(cpu);
+       req_cap = req_cap * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
+       set_cfs_cpu_capacity(cpu, true, req_cap);
+}
+#endif
+
 /*
  * The enqueue_task method is called before nr_running is
  * increased. Here we update the fair scheduling stats and
@@ -4167,6 +4229,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 {
        struct cfs_rq *cfs_rq;
        struct sched_entity *se = &p->se;
+#ifdef CONFIG_SMP
+       int task_new = flags & ENQUEUE_WAKEUP_NEW;
+       int task_wakeup = flags & ENQUEUE_WAKEUP;
+#endif
 
        for_each_sched_entity(se) {
                if (se->on_rq)
@@ -4183,6 +4249,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                if (cfs_rq_throttled(cfs_rq))
                        break;
                cfs_rq->h_nr_running++;
+               walt_inc_cfs_cumulative_runnable_avg(cfs_rq, p);
 
                flags = ENQUEUE_WAKEUP;
        }
@@ -4190,6 +4257,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
                cfs_rq->h_nr_running++;
+               walt_inc_cfs_cumulative_runnable_avg(cfs_rq, p);
 
                if (cfs_rq_throttled(cfs_rq))
                        break;
@@ -4201,6 +4269,47 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
        if (!se)
                add_nr_running(rq, 1);
 
+#ifdef CONFIG_SMP
+
+       /*
+        * Update SchedTune accounting.
+        *
+        * We do it before updating the CPU capacity to ensure the
+        * boost value of the current task is accounted for in the
+        * selection of the OPP.
+        *
+        * We do it also in the case where we enqueue a throttled task;
+        * we could argue that a throttled task should not boost a CPU,
+        * however:
+        * a) properly implementing CPU boosting considering throttled
+        *    tasks will increase a lot the complexity of the solution
+        * b) it's not easy to quantify the benefits introduced by
+        *    such a more complex solution.
+        * Thus, for the time being we go for the simple solution and boost
+        * also for throttled RQs.
+        */
+       schedtune_enqueue_task(p, cpu_of(rq));
+
+       if (!se) {
+               walt_inc_cumulative_runnable_avg(rq, p);
+               if (!task_new && !rq->rd->overutilized &&
+                   cpu_overutilized(rq->cpu)) {
+                       rq->rd->overutilized = true;
+                       trace_sched_overutilized(true);
+               }
+
+               /*
+                * We want to potentially trigger a freq switch
+                * request only for tasks that are waking up; this is
+                * because we get here also during load balancing, but
+                * in these cases it seems wise to trigger as single
+                * request after load balancing is done.
+                */
+               if (task_new || task_wakeup)
+                       update_capacity_of(cpu_of(rq));
+       }
+
+#endif /* CONFIG_SMP */
        hrtick_update(rq);
 }
 
@@ -4230,6 +4339,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                if (cfs_rq_throttled(cfs_rq))
                        break;
                cfs_rq->h_nr_running--;
+               walt_dec_cfs_cumulative_runnable_avg(cfs_rq, p);
 
                /* Don't dequeue parent if it has other entities besides us */
                if (cfs_rq->load.weight) {
@@ -4250,6 +4360,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
                cfs_rq->h_nr_running--;
+               walt_dec_cfs_cumulative_runnable_avg(cfs_rq, p);
 
                if (cfs_rq_throttled(cfs_rq))
                        break;
@@ -4261,6 +4372,38 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
        if (!se)
                sub_nr_running(rq, 1);
 
+#ifdef CONFIG_SMP
+
+       /*
+        * Update SchedTune accounting
+        *
+        * We do it before updating the CPU capacity to ensure the
+        * boost value of the current task is accounted for in the
+        * selection of the OPP.
+        */
+       schedtune_dequeue_task(p, cpu_of(rq));
+
+       if (!se) {
+               walt_dec_cumulative_runnable_avg(rq, p);
+
+               /*
+                * We want to potentially trigger a freq switch
+                * request only for tasks that are going to sleep;
+                * this is because we get here also during load
+                * balancing, but in these cases it seems wise to
+                * trigger as single request after load balancing is
+                * done.
+                */
+               if (task_sleep) {
+                       if (rq->cfs.nr_running)
+                               update_capacity_of(cpu_of(rq));
+                       else if (sched_freq())
+                               set_cfs_cpu_capacity(cpu_of(rq), false, 0);
+               }
+       }
+
+#endif /* CONFIG_SMP */
+
        hrtick_update(rq);
 }
 
@@ -4487,15 +4630,6 @@ static unsigned long target_load(int cpu, int type)
        return max(rq->cpu_load[type-1], total);
 }
 
-static unsigned long capacity_of(int cpu)
-{
-       return cpu_rq(cpu)->cpu_capacity;
-}
-
-static unsigned long capacity_orig_of(int cpu)
-{
-       return cpu_rq(cpu)->cpu_capacity_orig;
-}
 
 static unsigned long cpu_avg_load_per_task(int cpu)
 {
@@ -4669,6 +4803,392 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
 
 #endif
 
+/*
+ * Returns the current capacity of cpu after applying both
+ * cpu and freq scaling.
+ */
+unsigned long capacity_curr_of(int cpu)
+{
+       return cpu_rq(cpu)->cpu_capacity_orig *
+              arch_scale_freq_capacity(NULL, cpu)
+              >> SCHED_CAPACITY_SHIFT;
+}
+
+static inline bool energy_aware(void)
+{
+       return sched_feat(ENERGY_AWARE);
+}
+
+struct energy_env {
+       struct sched_group      *sg_top;
+       struct sched_group      *sg_cap;
+       int                     cap_idx;
+       int                     util_delta;
+       int                     src_cpu;
+       int                     dst_cpu;
+       int                     energy;
+       int                     payoff;
+       struct task_struct      *task;
+       struct {
+               int before;
+               int after;
+               int delta;
+               int diff;
+       } nrg;
+       struct {
+               int before;
+               int after;
+               int delta;
+       } cap;
+};
+
+/*
+ * __cpu_norm_util() returns the cpu util relative to a specific capacity,
+ * i.e. it's busy ratio, in the range [0..SCHED_LOAD_SCALE] which is useful for
+ * energy calculations. Using the scale-invariant util returned by
+ * cpu_util() and approximating scale-invariant util by:
+ *
+ *   util ~ (curr_freq/max_freq)*1024 * capacity_orig/1024 * running_time/time
+ *
+ * the normalized util can be found using the specific capacity.
+ *
+ *   capacity = capacity_orig * curr_freq/max_freq
+ *
+ *   norm_util = running_time/time ~ util/capacity
+ */
+static unsigned long __cpu_norm_util(int cpu, unsigned long capacity, int delta)
+{
+       int util = __cpu_util(cpu, delta);
+
+       if (util >= capacity)
+               return SCHED_CAPACITY_SCALE;
+
+       return (util << SCHED_CAPACITY_SHIFT)/capacity;
+}
+
+static int calc_util_delta(struct energy_env *eenv, int cpu)
+{
+       if (cpu == eenv->src_cpu)
+               return -eenv->util_delta;
+       if (cpu == eenv->dst_cpu)
+               return eenv->util_delta;
+       return 0;
+}
+
+static
+unsigned long group_max_util(struct energy_env *eenv)
+{
+       int i, delta;
+       unsigned long max_util = 0;
+
+       for_each_cpu(i, sched_group_cpus(eenv->sg_cap)) {
+               delta = calc_util_delta(eenv, i);
+               max_util = max(max_util, __cpu_util(i, delta));
+       }
+
+       return max_util;
+}
+
+/*
+ * group_norm_util() returns the approximated group util relative to it's
+ * current capacity (busy ratio) in the range [0..SCHED_LOAD_SCALE] for use in
+ * energy calculations. Since task executions may or may not overlap in time in
+ * the group the true normalized util is between max(cpu_norm_util(i)) and
+ * sum(cpu_norm_util(i)) when iterating over all cpus in the group, i. The
+ * latter is used as the estimate as it leads to a more pessimistic energy
+ * estimate (more busy).
+ */
+static unsigned
+long group_norm_util(struct energy_env *eenv, struct sched_group *sg)
+{
+       int i, delta;
+       unsigned long util_sum = 0;
+       unsigned long capacity = sg->sge->cap_states[eenv->cap_idx].cap;
+
+       for_each_cpu(i, sched_group_cpus(sg)) {
+               delta = calc_util_delta(eenv, i);
+               util_sum += __cpu_norm_util(i, capacity, delta);
+       }
+
+       if (util_sum > SCHED_CAPACITY_SCALE)
+               return SCHED_CAPACITY_SCALE;
+       return util_sum;
+}
+
+static int find_new_capacity(struct energy_env *eenv,
+       const struct sched_group_energy const *sge)
+{
+       int idx;
+       unsigned long util = group_max_util(eenv);
+
+       for (idx = 0; idx < sge->nr_cap_states; idx++) {
+               if (sge->cap_states[idx].cap >= util)
+                       break;
+       }
+
+       eenv->cap_idx = idx;
+
+       return idx;
+}
+
+static int group_idle_state(struct sched_group *sg)
+{
+       int i, state = INT_MAX;
+
+       /* Find the shallowest idle state in the sched group. */
+       for_each_cpu(i, sched_group_cpus(sg))
+               state = min(state, idle_get_state_idx(cpu_rq(i)));
+
+       /* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */
+       state++;
+
+       return state;
+}
+
+/*
+ * sched_group_energy(): Computes the absolute energy consumption of cpus
+ * belonging to the sched_group including shared resources shared only by
+ * members of the group. Iterates over all cpus in the hierarchy below the
+ * sched_group starting from the bottom working it's way up before going to
+ * the next cpu until all cpus are covered at all levels. The current
+ * implementation is likely to gather the same util statistics multiple times.
+ * This can probably be done in a faster but more complex way.
+ * Note: sched_group_energy() may fail when racing with sched_domain updates.
+ */
+static int sched_group_energy(struct energy_env *eenv)
+{
+       struct sched_domain *sd;
+       int cpu, total_energy = 0;
+       struct cpumask visit_cpus;
+       struct sched_group *sg;
+
+       WARN_ON(!eenv->sg_top->sge);
+
+       cpumask_copy(&visit_cpus, sched_group_cpus(eenv->sg_top));
+
+       while (!cpumask_empty(&visit_cpus)) {
+               struct sched_group *sg_shared_cap = NULL;
+
+               cpu = cpumask_first(&visit_cpus);
+
+               /*
+                * Is the group utilization affected by cpus outside this
+                * sched_group?
+                */
+               sd = rcu_dereference(per_cpu(sd_scs, cpu));
+
+               if (!sd)
+                       /*
+                        * We most probably raced with hotplug; returning a
+                        * wrong energy estimation is better than entering an
+                        * infinite loop.
+                        */
+                       return -EINVAL;
+
+               if (sd->parent)
+                       sg_shared_cap = sd->parent->groups;
+
+               for_each_domain(cpu, sd) {
+                       sg = sd->groups;
+
+                       /* Has this sched_domain already been visited? */
+                       if (sd->child && group_first_cpu(sg) != cpu)
+                               break;
+
+                       do {
+                               unsigned long group_util;
+                               int sg_busy_energy, sg_idle_energy;
+                               int cap_idx, idle_idx;
+
+                               if (sg_shared_cap && sg_shared_cap->group_weight >= sg->group_weight)
+                                       eenv->sg_cap = sg_shared_cap;
+                               else
+                                       eenv->sg_cap = sg;
+
+                               cap_idx = find_new_capacity(eenv, sg->sge);
+
+                               if (sg->group_weight == 1) {
+                                       /* Remove capacity of src CPU (before task move) */
+                                       if (eenv->util_delta == 0 &&
+                                           cpumask_test_cpu(eenv->src_cpu, sched_group_cpus(sg))) {
+                                               eenv->cap.before = sg->sge->cap_states[cap_idx].cap;
+                                               eenv->cap.delta -= eenv->cap.before;
+                                       }
+                                       /* Add capacity of dst CPU  (after task move) */
+                                       if (eenv->util_delta != 0 &&
+                                           cpumask_test_cpu(eenv->dst_cpu, sched_group_cpus(sg))) {
+                                               eenv->cap.after = sg->sge->cap_states[cap_idx].cap;
+                                               eenv->cap.delta += eenv->cap.after;
+                                       }
+                               }
+
+                               idle_idx = group_idle_state(sg);
+                               group_util = group_norm_util(eenv, sg);
+                               sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power)
+                                                               >> SCHED_CAPACITY_SHIFT;
+                               sg_idle_energy = ((SCHED_LOAD_SCALE-group_util)
+                                                               * sg->sge->idle_states[idle_idx].power)
+                                                               >> SCHED_CAPACITY_SHIFT;
+
+                               total_energy += sg_busy_energy + sg_idle_energy;
+
+                               if (!sd->child)
+                                       cpumask_xor(&visit_cpus, &visit_cpus, sched_group_cpus(sg));
+
+                               if (cpumask_equal(sched_group_cpus(sg), sched_group_cpus(eenv->sg_top)))
+                                       goto next_cpu;
+
+                       } while (sg = sg->next, sg != sd->groups);
+               }
+next_cpu:
+               cpumask_clear_cpu(cpu, &visit_cpus);
+               continue;
+       }
+
+       eenv->energy = total_energy;
+       return 0;
+}
+
+static inline bool cpu_in_sg(struct sched_group *sg, int cpu)
+{
+       return cpu != -1 && cpumask_test_cpu(cpu, sched_group_cpus(sg));
+}
+
+/*
+ * energy_diff(): Estimate the energy impact of changing the utilization
+ * distribution. eenv specifies the change: utilisation amount, source, and
+ * destination cpu. Source or destination cpu may be -1 in which case the
+ * utilization is removed from or added to the system (e.g. task wake-up). If
+ * both are specified, the utilization is migrated.
+ */
+static inline int __energy_diff(struct energy_env *eenv)
+{
+       struct sched_domain *sd;
+       struct sched_group *sg;
+       int sd_cpu = -1, energy_before = 0, energy_after = 0;
+
+       struct energy_env eenv_before = {
+               .util_delta     = 0,
+               .src_cpu        = eenv->src_cpu,
+               .dst_cpu        = eenv->dst_cpu,
+               .nrg            = { 0, 0, 0, 0},
+               .cap            = { 0, 0, 0 },
+       };
+
+       if (eenv->src_cpu == eenv->dst_cpu)
+               return 0;
+
+       sd_cpu = (eenv->src_cpu != -1) ? eenv->src_cpu : eenv->dst_cpu;
+       sd = rcu_dereference(per_cpu(sd_ea, sd_cpu));
+
+       if (!sd)
+               return 0; /* Error */
+
+       sg = sd->groups;
+
+       do {
+               if (cpu_in_sg(sg, eenv->src_cpu) || cpu_in_sg(sg, eenv->dst_cpu)) {
+                       eenv_before.sg_top = eenv->sg_top = sg;
+
+                       if (sched_group_energy(&eenv_before))
+                               return 0; /* Invalid result abort */
+                       energy_before += eenv_before.energy;
+
+                       /* Keep track of SRC cpu (before) capacity */
+                       eenv->cap.before = eenv_before.cap.before;
+                       eenv->cap.delta = eenv_before.cap.delta;
+
+                       if (sched_group_energy(eenv))
+                               return 0; /* Invalid result abort */
+                       energy_after += eenv->energy;
+               }
+       } while (sg = sg->next, sg != sd->groups);
+
+       eenv->nrg.before = energy_before;
+       eenv->nrg.after = energy_after;
+       eenv->nrg.diff = eenv->nrg.after - eenv->nrg.before;
+       eenv->payoff = 0;
+
+       trace_sched_energy_diff(eenv->task,
+                       eenv->src_cpu, eenv->dst_cpu, eenv->util_delta,
+                       eenv->nrg.before, eenv->nrg.after, eenv->nrg.diff,
+                       eenv->cap.before, eenv->cap.after, eenv->cap.delta,
+                       eenv->nrg.delta, eenv->payoff);
+
+       return eenv->nrg.diff;
+}
+
+#ifdef CONFIG_SCHED_TUNE
+
+struct target_nrg schedtune_target_nrg;
+
+/*
+ * System energy normalization
+ * Returns the normalized value, in the range [0..SCHED_LOAD_SCALE],
+ * corresponding to the specified energy variation.
+ */
+static inline int
+normalize_energy(int energy_diff)
+{
+       u32 normalized_nrg;
+#ifdef CONFIG_SCHED_DEBUG
+       int max_delta;
+
+       /* Check for boundaries */
+       max_delta  = schedtune_target_nrg.max_power;
+       max_delta -= schedtune_target_nrg.min_power;
+       WARN_ON(abs(energy_diff) >= max_delta);
+#endif
+
+       /* Do scaling using positive numbers to increase the range */
+       normalized_nrg = (energy_diff < 0) ? -energy_diff : energy_diff;
+
+       /* Scale by energy magnitude */
+       normalized_nrg <<= SCHED_LOAD_SHIFT;
+
+       /* Normalize on max energy for target platform */
+       normalized_nrg = reciprocal_divide(
+                       normalized_nrg, schedtune_target_nrg.rdiv);
+
+       return (energy_diff < 0) ? -normalized_nrg : normalized_nrg;
+}
+
+static inline int
+energy_diff(struct energy_env *eenv)
+{
+       int boost = schedtune_task_boost(eenv->task);
+       int nrg_delta;
+
+       /* Conpute "absolute" energy diff */
+       __energy_diff(eenv);
+
+       /* Return energy diff when boost margin is 0 */
+       if (boost == 0)
+               return eenv->nrg.diff;
+
+       /* Compute normalized energy diff */
+       nrg_delta = normalize_energy(eenv->nrg.diff);
+       eenv->nrg.delta = nrg_delta;
+
+       eenv->payoff = schedtune_accept_deltas(
+                       eenv->nrg.delta,
+                       eenv->cap.delta,
+                       eenv->task);
+
+       /*
+        * When SchedTune is enabled, the energy_diff() function will return
+        * the computed energy payoff value. Since the energy_diff() return
+        * value is expected to be negative by its callers, this evaluation
+        * function return a negative value each time the evaluation return a
+        * positive payoff, which is the condition for the acceptance of
+        * a scheduling decision
+        */
+       return -eenv->payoff;
+}
+#else /* CONFIG_SCHED_TUNE */
+#define energy_diff(eenv) __energy_diff(eenv)
+#endif
+
 /*
  * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
  * A waker of many should wake a different task than the one last awakened
@@ -4760,6 +5280,160 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
        return 1;
 }
 
+static inline unsigned long task_util(struct task_struct *p)
+{
+#ifdef CONFIG_SCHED_WALT
+       if (!walt_disabled && sysctl_sched_use_walt_task_util) {
+               unsigned long demand = p->ravg.demand;
+               return (demand << 10) / walt_ravg_window;
+       }
+#endif
+       return p->se.avg.util_avg;
+}
+
+unsigned int capacity_margin = 1280; /* ~20% margin */
+
+static inline unsigned long boosted_task_util(struct task_struct *task);
+
+static inline bool __task_fits(struct task_struct *p, int cpu, int util)
+{
+       unsigned long capacity = capacity_of(cpu);
+
+       util += boosted_task_util(p);
+
+       return (capacity * 1024) > (util * capacity_margin);
+}
+
+static inline bool task_fits_max(struct task_struct *p, int cpu)
+{
+       unsigned long capacity = capacity_of(cpu);
+       unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity.val;
+
+       if (capacity == max_capacity)
+               return true;
+
+       if (capacity * capacity_margin > max_capacity * 1024)
+               return true;
+
+       return __task_fits(p, cpu, 0);
+}
+
+static inline bool task_fits_spare(struct task_struct *p, int cpu)
+{
+       return __task_fits(p, cpu, cpu_util(cpu));
+}
+
+static bool cpu_overutilized(int cpu)
+{
+       return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
+}
+
+#ifdef CONFIG_SCHED_TUNE
+
+static long
+schedtune_margin(unsigned long signal, long boost)
+{
+       long long margin = 0;
+
+       /*
+        * Signal proportional compensation (SPC)
+        *
+        * The Boost (B) value is used to compute a Margin (M) which is
+        * proportional to the complement of the original Signal (S):
+        *   M = B * (SCHED_LOAD_SCALE - S), if B is positive
+        *   M = B * S, if B is negative
+        * The obtained M could be used by the caller to "boost" S.
+        */
+       if (boost >= 0) {
+               margin  = SCHED_LOAD_SCALE - signal;
+               margin *= boost;
+       } else
+               margin = -signal * boost;
+       /*
+        * Fast integer division by constant:
+        *  Constant   :                 (C) = 100
+        *  Precision  : 0.1%            (P) = 0.1
+        *  Reference  : C * 100 / P     (R) = 100000
+        *
+        * Thus:
+        *  Shift bits : ceil(log(R,2))  (S) = 17
+        *  Mult const : round(2^S/C)    (M) = 1311
+        *
+        *
+        */
+       margin  *= 1311;
+       margin >>= 17;
+
+       if (boost < 0)
+               margin *= -1;
+       return margin;
+}
+
+static inline int
+schedtune_cpu_margin(unsigned long util, int cpu)
+{
+       int boost = schedtune_cpu_boost(cpu);
+
+       if (boost == 0)
+               return 0;
+
+       return schedtune_margin(util, boost);
+}
+
+static inline long
+schedtune_task_margin(struct task_struct *task)
+{
+       int boost = schedtune_task_boost(task);
+       unsigned long util;
+       long margin;
+
+       if (boost == 0)
+               return 0;
+
+       util = task_util(task);
+       margin = schedtune_margin(util, boost);
+
+       return margin;
+}
+
+#else /* CONFIG_SCHED_TUNE */
+
+static inline int
+schedtune_cpu_margin(unsigned long util, int cpu)
+{
+       return 0;
+}
+
+static inline int
+schedtune_task_margin(struct task_struct *task)
+{
+       return 0;
+}
+
+#endif /* CONFIG_SCHED_TUNE */
+
+static inline unsigned long
+boosted_cpu_util(int cpu)
+{
+       unsigned long util = cpu_util(cpu);
+       long margin = schedtune_cpu_margin(util, cpu);
+
+       trace_sched_boost_cpu(cpu, util, margin);
+
+       return util + margin;
+}
+
+static inline unsigned long
+boosted_task_util(struct task_struct *task)
+{
+       unsigned long util = task_util(task);
+       long margin = schedtune_task_margin(task);
+
+       trace_sched_boost_task(task, util, margin);
+
+       return util + margin;
+}
+
 /*
  * find_idlest_group finds and returns the least busy CPU group within the
  * domain.
@@ -4769,7 +5443,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                  int this_cpu, int sd_flag)
 {
        struct sched_group *idlest = NULL, *group = sd->groups;
+       struct sched_group *fit_group = NULL, *spare_group = NULL;
        unsigned long min_load = ULONG_MAX, this_load = 0;
+       unsigned long fit_capacity = ULONG_MAX;
+       unsigned long max_spare_capacity = capacity_margin - SCHED_LOAD_SCALE;
        int load_idx = sd->forkexec_idx;
        int imbalance = 100 + (sd->imbalance_pct-100)/2;
 
@@ -4777,7 +5454,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                load_idx = sd->wake_idx;
 
        do {
-               unsigned long load, avg_load;
+               unsigned long load, avg_load, spare_capacity;
                int local_group;
                int i;
 
@@ -4800,6 +5477,25 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                                load = target_load(i, load_idx);
 
                        avg_load += load;
+
+                       /*
+                        * Look for most energy-efficient group that can fit
+                        * that can fit the task.
+                        */
+                       if (capacity_of(i) < fit_capacity && task_fits_spare(p, i)) {
+                               fit_capacity = capacity_of(i);
+                               fit_group = group;
+                       }
+
+                       /*
+                        * Look for group which has most spare capacity on a
+                        * single cpu.
+                        */
+                       spare_capacity = capacity_of(i) - cpu_util(i);
+                       if (spare_capacity > max_spare_capacity) {
+                               max_spare_capacity = spare_capacity;
+                               spare_group = group;
+                       }
                }
 
                /* Adjust by relative CPU capacity of the group */
@@ -4813,6 +5509,12 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                }
        } while (group = group->next, group != sd->groups);
 
+       if (fit_group)
+               return fit_group;
+
+       if (spare_group)
+               return spare_group;
+
        if (!idlest || 100*this_load < imbalance*min_load)
                return NULL;
        return idlest;
@@ -4833,7 +5535,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 
        /* Traverse only the allowed CPUs */
        for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
-               if (idle_cpu(i)) {
+               if (task_fits_spare(p, i)) {
                        struct rq *rq = cpu_rq(i);
                        struct cpuidle_state *idle = idle_get_state(rq);
                        if (idle && idle->exit_latency < min_exit_latency) {
@@ -4845,7 +5547,8 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
                                min_exit_latency = idle->exit_latency;
                                latest_idle_timestamp = rq->idle_stamp;
                                shallowest_idle_cpu = i;
-                       } else if ((!idle || idle->exit_latency == min_exit_latency) &&
+                       } else if (idle_cpu(i) &&
+                                  (!idle || idle->exit_latency == min_exit_latency) &&
                                   rq->idle_stamp > latest_idle_timestamp) {
                                /*
                                 * If equal or no active idle state, then
@@ -4854,6 +5557,13 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
                                 */
                                latest_idle_timestamp = rq->idle_stamp;
                                shallowest_idle_cpu = i;
+                       } else if (shallowest_idle_cpu == -1) {
+                               /*
+                                * If we haven't found an idle CPU yet
+                                * pick a non-idle one that can fit the task as
+                                * fallback.
+                                */
+                               shallowest_idle_cpu = i;
                        }
                } else if (shallowest_idle_cpu == -1) {
                        load = weighted_cpuload(i);
@@ -4875,15 +5585,20 @@ static int select_idle_sibling(struct task_struct *p, int target)
        struct sched_domain *sd;
        struct sched_group *sg;
        int i = task_cpu(p);
+       int best_idle = -1;
+       int best_idle_cstate = -1;
+       int best_idle_capacity = INT_MAX;
 
-       if (idle_cpu(target))
-               return target;
+       if (!sysctl_sched_cstate_aware) {
+               if (idle_cpu(target))
+                       return target;
 
-       /*
-        * If the prevous cpu is cache affine and idle, don't be stupid.
-        */
-       if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
-               return i;
+               /*
+                * If the prevous cpu is cache affine and idle, don't be stupid.
+                */
+               if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
+                       return i;
+       }
 
        /*
         * Otherwise, iterate the domains and find an elegible idle cpu.
@@ -4896,54 +5611,262 @@ static int select_idle_sibling(struct task_struct *p, int target)
                                                tsk_cpus_allowed(p)))
                                goto next;
 
-                       for_each_cpu(i, sched_group_cpus(sg)) {
-                               if (i == target || !idle_cpu(i))
-                                       goto next;
-                       }
+                       if (sysctl_sched_cstate_aware) {
+                               for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg)) {
+                                       struct rq *rq = cpu_rq(i);
+                                       int idle_idx = idle_get_state_idx(rq);
+                                       unsigned long new_usage = boosted_task_util(p);
+                                       unsigned long capacity_orig = capacity_orig_of(i);
+                                       if (new_usage > capacity_orig || !idle_cpu(i))
+                                               goto next;
+
+                                       if (i == target && new_usage <= capacity_curr_of(target))
+                                               return target;
+
+                                       if (best_idle < 0 || (idle_idx < best_idle_cstate && capacity_orig <= best_idle_capacity)) {
+                                               best_idle = i;
+                                               best_idle_cstate = idle_idx;
+                                               best_idle_capacity = capacity_orig;
+                                       }
+                               }
+                       } else {
+                               for_each_cpu(i, sched_group_cpus(sg)) {
+                                       if (i == target || !idle_cpu(i))
+                                               goto next;
+                               }
 
-                       target = cpumask_first_and(sched_group_cpus(sg),
+                               target = cpumask_first_and(sched_group_cpus(sg),
                                        tsk_cpus_allowed(p));
-                       goto done;
+                               goto done;
+                       }
 next:
                        sg = sg->next;
                } while (sg != sd->groups);
        }
+       if (best_idle > 0)
+               target = best_idle;
+
 done:
        return target;
 }
 
-/*
- * cpu_util returns the amount of capacity of a CPU that is used by CFS
- * tasks. The unit of the return value must be the one of capacity so we can
- * compare the utilization with the capacity of the CPU that is available for
- * CFS task (ie cpu_capacity).
- *
- * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
- * recent utilization of currently non-runnable tasks on a CPU. It represents
- * the amount of utilization of a CPU in the range [0..capacity_orig] where
- * capacity_orig is the cpu_capacity available at the highest frequency
- * (arch_scale_freq_capacity()).
- * The utilization of a CPU converges towards a sum equal to or less than the
- * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
- * the running time on this CPU scaled by capacity_curr.
- *
- * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
- * higher than capacity_orig because of unfortunate rounding in
- * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
- * the average stabilizes with the new running time. We need to check that the
- * utilization stays within the range of [0..capacity_orig] and cap it if
- * necessary. Without utilization capping, a group could be seen as overloaded
- * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
- * available capacity. We allow utilization to overshoot capacity_curr (but not
- * capacity_orig) as it useful for predicting the capacity required after task
- * migrations (scheduler-driven DVFS).
- */
-static int cpu_util(int cpu)
+static inline int find_best_target(struct task_struct *p, bool boosted, bool prefer_idle)
 {
-       unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
-       unsigned long capacity = capacity_orig_of(cpu);
+       int iter_cpu;
+       int target_cpu = -1;
+       int target_util = 0;
+       int backup_capacity = 0;
+       int best_idle_cpu = -1;
+       int best_idle_cstate = INT_MAX;
+       int backup_cpu = -1;
+       unsigned long task_util_boosted, new_util;
+
+       task_util_boosted = boosted_task_util(p);
+       for (iter_cpu = 0; iter_cpu < NR_CPUS; iter_cpu++) {
+               int cur_capacity;
+               struct rq *rq;
+               int idle_idx;
+
+               /*
+                * Iterate from higher cpus for boosted tasks.
+                */
+               int i = boosted ? NR_CPUS-iter_cpu-1 : iter_cpu;
+
+               if (!cpu_online(i) || !cpumask_test_cpu(i, tsk_cpus_allowed(p)))
+                       continue;
+
+               /*
+                * p's blocked utilization is still accounted for on prev_cpu
+                * so prev_cpu will receive a negative bias due to the double
+                * accounting. However, the blocked utilization may be zero.
+                */
+               new_util = cpu_util(i) + task_util_boosted;
+
+               /*
+                * Ensure minimum capacity to grant the required boost.
+                * The target CPU can be already at a capacity level higher
+                * than the one required to boost the task.
+                */
+               if (new_util > capacity_orig_of(i))
+                       continue;
 
-       return (util >= capacity) ? capacity : util;
+#ifdef CONFIG_SCHED_WALT
+               if (walt_cpu_high_irqload(i))
+                       continue;
+#endif
+               /*
+                * Unconditionally favoring tasks that prefer idle cpus to
+                * improve latency.
+                */
+               if (idle_cpu(i) && prefer_idle) {
+                       if (best_idle_cpu < 0)
+                               best_idle_cpu = i;
+                       continue;
+               }
+
+               cur_capacity = capacity_curr_of(i);
+               rq = cpu_rq(i);
+               idle_idx = idle_get_state_idx(rq);
+
+               if (new_util < cur_capacity) {
+                       if (cpu_rq(i)->nr_running) {
+                               if (prefer_idle) {
+                                       /* Find a target cpu with highest
+                                        * utilization.
+                                        */
+                                       if (target_util == 0 ||
+                                               target_util < new_util) {
+                                               target_cpu = i;
+                                               target_util = new_util;
+                                       }
+                               } else {
+                                       /* Find a target cpu with lowest
+                                        * utilization.
+                                        */
+                                       if (target_util == 0 ||
+                                               target_util > new_util) {
+                                               target_cpu = i;
+                                               target_util = new_util;
+                                       }
+                               }
+                       } else if (!prefer_idle) {
+                               if (best_idle_cpu < 0 ||
+                                       (sysctl_sched_cstate_aware &&
+                                               best_idle_cstate > idle_idx)) {
+                                       best_idle_cstate = idle_idx;
+                                       best_idle_cpu = i;
+                               }
+                       }
+               } else if (backup_capacity == 0 ||
+                               backup_capacity > cur_capacity) {
+                       // Find a backup cpu with least capacity.
+                       backup_capacity = cur_capacity;
+                       backup_cpu = i;
+               }
+       }
+
+       if (prefer_idle && best_idle_cpu >= 0)
+               target_cpu = best_idle_cpu;
+       else if (target_cpu < 0)
+               target_cpu = best_idle_cpu >= 0 ? best_idle_cpu : backup_cpu;
+
+       return target_cpu;
+}
+
+static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
+{
+       struct sched_domain *sd;
+       struct sched_group *sg, *sg_target;
+       int target_max_cap = INT_MAX;
+       int target_cpu = task_cpu(p);
+       unsigned long task_util_boosted, new_util;
+       int i;
+
+       if (sysctl_sched_sync_hint_enable && sync) {
+               int cpu = smp_processor_id();
+               cpumask_t search_cpus;
+               cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
+               if (cpumask_test_cpu(cpu, &search_cpus))
+                       return cpu;
+       }
+
+       sd = rcu_dereference(per_cpu(sd_ea, task_cpu(p)));
+
+       if (!sd)
+               return target;
+
+       sg = sd->groups;
+       sg_target = sg;
+
+       if (sysctl_sched_is_big_little) {
+
+               /*
+                * Find group with sufficient capacity. We only get here if no cpu is
+                * overutilized. We may end up overutilizing a cpu by adding the task,
+                * but that should not be any worse than select_idle_sibling().
+                * load_balance() should sort it out later as we get above the tipping
+                * point.
+                */
+               do {
+                       /* Assuming all cpus are the same in group */
+                       int max_cap_cpu = group_first_cpu(sg);
+
+                       /*
+                        * Assume smaller max capacity means more energy-efficient.
+                        * Ideally we should query the energy model for the right
+                        * answer but it easily ends up in an exhaustive search.
+                        */
+                       if (capacity_of(max_cap_cpu) < target_max_cap &&
+                           task_fits_max(p, max_cap_cpu)) {
+                               sg_target = sg;
+                               target_max_cap = capacity_of(max_cap_cpu);
+                       }
+               } while (sg = sg->next, sg != sd->groups);
+
+               task_util_boosted = boosted_task_util(p);
+               /* Find cpu with sufficient capacity */
+               for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg_target)) {
+                       /*
+                        * p's blocked utilization is still accounted for on prev_cpu
+                        * so prev_cpu will receive a negative bias due to the double
+                        * accounting. However, the blocked utilization may be zero.
+                        */
+                       new_util = cpu_util(i) + task_util_boosted;
+
+                       /*
+                        * Ensure minimum capacity to grant the required boost.
+                        * The target CPU can be already at a capacity level higher
+                        * than the one required to boost the task.
+                        */
+                       if (new_util > capacity_orig_of(i))
+                               continue;
+
+                       if (new_util < capacity_curr_of(i)) {
+                               target_cpu = i;
+                               if (cpu_rq(i)->nr_running)
+                                       break;
+                       }
+
+                       /* cpu has capacity at higher OPP, keep it as fallback */
+                       if (target_cpu == task_cpu(p))
+                               target_cpu = i;
+               }
+       } else {
+               /*
+                * Find a cpu with sufficient capacity
+                */
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+               bool boosted = schedtune_task_boost(p) > 0;
+               bool prefer_idle = schedtune_prefer_idle(p) > 0;
+#else
+               bool boosted = 0;
+               bool prefer_idle = 0;
+#endif
+               int tmp_target = find_best_target(p, boosted, prefer_idle);
+               if (tmp_target >= 0) {
+                       target_cpu = tmp_target;
+                       if ((boosted || prefer_idle) && idle_cpu(target_cpu))
+                               return target_cpu;
+               }
+       }
+
+       if (target_cpu != task_cpu(p)) {
+               struct energy_env eenv = {
+                       .util_delta     = task_util(p),
+                       .src_cpu        = task_cpu(p),
+                       .dst_cpu        = target_cpu,
+                       .task           = p,
+               };
+
+               /* Not enough spare capacity on previous cpu */
+               if (cpu_overutilized(task_cpu(p)))
+                       return target_cpu;
+
+               if (energy_diff(&eenv) >= 0)
+                       return task_cpu(p);
+       }
+
+       return target_cpu;
 }
 
 /*
@@ -4968,7 +5891,9 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
        int sync = wake_flags & WF_SYNC;
 
        if (sd_flag & SD_BALANCE_WAKE)
-               want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
+               want_affine = (!wake_wide(p) && task_fits_max(p, cpu) &&
+                             cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) ||
+                             energy_aware();
 
        rcu_read_lock();
        for_each_domain(cpu, tmp) {
@@ -4998,7 +5923,9 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
        }
 
        if (!sd) {
-               if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
+               if (energy_aware() && !cpu_rq(cpu)->rd->overutilized)
+                       new_cpu = energy_aware_wake_cpu(p, prev_cpu, sync);
+               else if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
                        new_cpu = select_idle_sibling(p, new_cpu);
 
        } else while (sd) {
@@ -5068,6 +5995,8 @@ static void task_dead_fair(struct task_struct *p)
 {
        remove_entity_load_avg(&p->se);
 }
+#else
+#define task_fits_max(p, cpu) true
 #endif /* CONFIG_SMP */
 
 static unsigned long
@@ -5314,6 +6243,8 @@ again:
        if (hrtick_enabled(rq))
                hrtick_start_fair(rq, p);
 
+       rq->misfit_task = !task_fits_max(p, rq->cpu);
+
        return p;
 simple:
        cfs_rq = &rq->cfs;
@@ -5335,9 +6266,12 @@ simple:
        if (hrtick_enabled(rq))
                hrtick_start_fair(rq, p);
 
+       rq->misfit_task = !task_fits_max(p, rq->cpu);
+
        return p;
 
 idle:
+       rq->misfit_task = 0;
        /*
         * This is OK, because current is on_cpu, which avoids it being picked
         * for load-balance and preemption/IRQs are still disabled avoiding
@@ -5550,6 +6484,13 @@ static unsigned long __read_mostly max_load_balance_interval = HZ/10;
 
 enum fbq_type { regular, remote, all };
 
+enum group_type {
+       group_other = 0,
+       group_misfit_task,
+       group_imbalanced,
+       group_overloaded,
+};
+
 #define LBF_ALL_PINNED 0x01
 #define LBF_NEED_BREAK 0x02
 #define LBF_DST_PINNED  0x04
@@ -5568,6 +6509,7 @@ struct lb_env {
        int                     new_dst_cpu;
        enum cpu_idle_type      idle;
        long                    imbalance;
+       unsigned int            src_grp_nr_running;
        /* The set of CPUs under consideration for load-balancing */
        struct cpumask          *cpus;
 
@@ -5578,6 +6520,7 @@ struct lb_env {
        unsigned int            loop_max;
 
        enum fbq_type           fbq_type;
+       enum group_type         busiest_group_type;
        struct list_head        tasks;
 };
 
@@ -5759,7 +6702,9 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
 
        deactivate_task(env->src_rq, p, 0);
        p->on_rq = TASK_ON_RQ_MIGRATING;
+       double_lock_balance(env->src_rq, env->dst_rq);
        set_task_cpu(p, env->dst_cpu);
+       double_unlock_balance(env->src_rq, env->dst_rq);
 }
 
 /*
@@ -5904,6 +6849,10 @@ static void attach_one_task(struct rq *rq, struct task_struct *p)
 {
        raw_spin_lock(&rq->lock);
        attach_task(rq, p);
+       /*
+        * We want to potentially raise target_cpu's OPP.
+        */
+       update_capacity_of(cpu_of(rq));
        raw_spin_unlock(&rq->lock);
 }
 
@@ -5925,6 +6874,11 @@ static void attach_tasks(struct lb_env *env)
                attach_task(env->dst_rq, p);
        }
 
+       /*
+        * We want to potentially raise env.dst_cpu's OPP.
+        */
+       update_capacity_of(env->dst_cpu);
+
        raw_spin_unlock(&env->dst_rq->lock);
 }
 
@@ -6020,12 +6974,6 @@ static unsigned long task_h_load(struct task_struct *p)
 
 /********** Helpers for find_busiest_group ************************/
 
-enum group_type {
-       group_other = 0,
-       group_imbalanced,
-       group_overloaded,
-};
-
 /*
  * sg_lb_stats - stats of a sched_group required for load_balancing
  */
@@ -6041,6 +6989,7 @@ struct sg_lb_stats {
        unsigned int group_weight;
        enum group_type group_type;
        int group_no_capacity;
+       int group_misfit_task; /* A cpu has a task too big for its capacity */
 #ifdef CONFIG_NUMA_BALANCING
        unsigned int nr_numa_running;
        unsigned int nr_preferred_running;
@@ -6132,19 +7081,58 @@ static unsigned long scale_rt_capacity(int cpu)
 
        used = div_u64(avg, total);
 
+       /*
+        * deadline bandwidth is defined at system level so we must
+        * weight this bandwidth with the max capacity of the system.
+        * As a reminder, avg_bw is 20bits width and
+        * scale_cpu_capacity is 10 bits width
+        */
+       used += div_u64(rq->dl.avg_bw, arch_scale_cpu_capacity(NULL, cpu));
+
        if (likely(used < SCHED_CAPACITY_SCALE))
                return SCHED_CAPACITY_SCALE - used;
 
        return 1;
 }
 
+void init_max_cpu_capacity(struct max_cpu_capacity *mcc)
+{
+       raw_spin_lock_init(&mcc->lock);
+       mcc->val = 0;
+       mcc->cpu = -1;
+}
+
 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 {
        unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
        struct sched_group *sdg = sd->groups;
+       struct max_cpu_capacity *mcc;
+       unsigned long max_capacity;
+       int max_cap_cpu;
+       unsigned long flags;
 
        cpu_rq(cpu)->cpu_capacity_orig = capacity;
 
+       mcc = &cpu_rq(cpu)->rd->max_cpu_capacity;
+
+       raw_spin_lock_irqsave(&mcc->lock, flags);
+       max_capacity = mcc->val;
+       max_cap_cpu = mcc->cpu;
+
+       if ((max_capacity > capacity && max_cap_cpu == cpu) ||
+           (max_capacity < capacity)) {
+               mcc->val = capacity;
+               mcc->cpu = cpu;
+#ifdef CONFIG_SCHED_DEBUG
+               raw_spin_unlock_irqrestore(&mcc->lock, flags);
+               printk_deferred(KERN_INFO "CPU%d: update max cpu_capacity %lu\n",
+                               cpu, capacity);
+               goto skip_unlock;
+#endif
+       }
+       raw_spin_unlock_irqrestore(&mcc->lock, flags);
+
+skip_unlock: __attribute__ ((unused));
        capacity *= scale_rt_capacity(cpu);
        capacity >>= SCHED_CAPACITY_SHIFT;
 
@@ -6153,13 +7141,14 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 
        cpu_rq(cpu)->cpu_capacity = capacity;
        sdg->sgc->capacity = capacity;
+       sdg->sgc->max_capacity = capacity;
 }
 
 void update_group_capacity(struct sched_domain *sd, int cpu)
 {
        struct sched_domain *child = sd->child;
        struct sched_group *group, *sdg = sd->groups;
-       unsigned long capacity;
+       unsigned long capacity, max_capacity;
        unsigned long interval;
 
        interval = msecs_to_jiffies(sd->balance_interval);
@@ -6172,6 +7161,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
        }
 
        capacity = 0;
+       max_capacity = 0;
 
        if (child->flags & SD_OVERLAP) {
                /*
@@ -6196,11 +7186,12 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
                         */
                        if (unlikely(!rq->sd)) {
                                capacity += capacity_of(cpu);
-                               continue;
+                       } else {
+                               sgc = rq->sd->groups->sgc;
+                               capacity += sgc->capacity;
                        }
 
-                       sgc = rq->sd->groups->sgc;
-                       capacity += sgc->capacity;
+                       max_capacity = max(capacity, max_capacity);
                }
        } else  {
                /*
@@ -6210,12 +7201,16 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
 
                group = child->groups;
                do {
-                       capacity += group->sgc->capacity;
+                       struct sched_group_capacity *sgc = group->sgc;
+
+                       capacity += sgc->capacity;
+                       max_capacity = max(sgc->max_capacity, max_capacity);
                        group = group->next;
                } while (group != child->groups);
        }
 
        sdg->sgc->capacity = capacity;
+       sdg->sgc->max_capacity = max_capacity;
 }
 
 /*
@@ -6310,6 +7305,18 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
        return false;
 }
 
+
+/*
+ * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller
+ * per-cpu capacity than sched_group ref.
+ */
+static inline bool
+group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
+{
+       return sg->sgc->max_capacity + capacity_margin - SCHED_LOAD_SCALE <
+                                                       ref->sgc->max_capacity;
+}
+
 static inline enum
 group_type group_classify(struct sched_group *group,
                          struct sg_lb_stats *sgs)
@@ -6320,6 +7327,9 @@ group_type group_classify(struct sched_group *group,
        if (sg_imbalanced(group))
                return group_imbalanced;
 
+       if (sgs->group_misfit_task)
+               return group_misfit_task;
+
        return group_other;
 }
 
@@ -6331,14 +7341,15 @@ group_type group_classify(struct sched_group *group,
  * @local_group: Does group contain this_cpu.
  * @sgs: variable to hold the statistics for this group.
  * @overload: Indicate more than one runnable task for any CPU.
+ * @overutilized: Indicate overutilization for any CPU.
  */
 static inline void update_sg_lb_stats(struct lb_env *env,
                        struct sched_group *group, int load_idx,
                        int local_group, struct sg_lb_stats *sgs,
-                       bool *overload)
+                       bool *overload, bool *overutilized)
 {
        unsigned long load;
-       int i;
+       int i, nr_running;
 
        memset(sgs, 0, sizeof(*sgs));
 
@@ -6355,7 +7366,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                sgs->group_util += cpu_util(i);
                sgs->sum_nr_running += rq->cfs.h_nr_running;
 
-               if (rq->nr_running > 1)
+               nr_running = rq->nr_running;
+               if (nr_running > 1)
                        *overload = true;
 
 #ifdef CONFIG_NUMA_BALANCING
@@ -6363,8 +7375,17 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                sgs->nr_preferred_running += rq->nr_preferred_running;
 #endif
                sgs->sum_weighted_load += weighted_cpuload(i);
-               if (idle_cpu(i))
+               /*
+                * No need to call idle_cpu() if nr_running is not 0
+                */
+               if (!nr_running && idle_cpu(i))
                        sgs->idle_cpus++;
+
+               if (cpu_overutilized(i)) {
+                       *overutilized = true;
+                       if (!sgs->group_misfit_task && rq->misfit_task)
+                               sgs->group_misfit_task = capacity_of(i);
+               }
        }
 
        /* Adjust by relative CPU capacity of the group */
@@ -6406,9 +7427,25 @@ static bool update_sd_pick_busiest(struct lb_env *env,
        if (sgs->group_type < busiest->group_type)
                return false;
 
+       /*
+        * Candidate sg doesn't face any serious load-balance problems
+        * so don't pick it if the local sg is already filled up.
+        */
+       if (sgs->group_type == group_other &&
+           !group_has_capacity(env, &sds->local_stat))
+               return false;
+
        if (sgs->avg_load <= busiest->avg_load)
                return false;
 
+       /*
+        * Candiate sg has no more than one task per cpu and has higher
+        * per-cpu capacity. No reason to pull tasks to less capable cpus.
+        */
+       if (sgs->sum_nr_running <= sgs->group_weight &&
+           group_smaller_cpu_capacity(sds->local, sg))
+               return false;
+
        /* This is the busiest node in its class. */
        if (!(env->sd->flags & SD_ASYM_PACKING))
                return true;
@@ -6470,7 +7507,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
        struct sched_group *sg = env->sd->groups;
        struct sg_lb_stats tmp_sgs;
        int load_idx, prefer_sibling = 0;
-       bool overload = false;
+       bool overload = false, overutilized = false;
 
        if (child && child->flags & SD_PREFER_SIBLING)
                prefer_sibling = 1;
@@ -6492,7 +7529,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
                }
 
                update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
-                                               &overload);
+                                               &overload, &overutilized);
 
                if (local_group)
                        goto next_group;
@@ -6514,6 +7551,15 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
                        sgs->group_type = group_classify(sg, sgs);
                }
 
+               /*
+                * Ignore task groups with misfit tasks if local group has no
+                * capacity or if per-cpu capacity isn't higher.
+                */
+               if (sgs->group_type == group_misfit_task &&
+                   (!group_has_capacity(env, &sds->local_stat) ||
+                    !group_smaller_cpu_capacity(sg, sds->local)))
+                       sgs->group_type = group_other;
+
                if (update_sd_pick_busiest(env, sds, sg, sgs)) {
                        sds->busiest = sg;
                        sds->busiest_stat = *sgs;
@@ -6530,10 +7576,23 @@ next_group:
        if (env->sd->flags & SD_NUMA)
                env->fbq_type = fbq_classify_group(&sds->busiest_stat);
 
+       env->src_grp_nr_running = sds->busiest_stat.sum_nr_running;
+
        if (!env->sd->parent) {
                /* update overload indicator if we are at root domain */
                if (env->dst_rq->rd->overload != overload)
                        env->dst_rq->rd->overload = overload;
+
+               /* Update over-utilization (tipping point, U >= 0) indicator */
+               if (env->dst_rq->rd->overutilized != overutilized) {
+                       env->dst_rq->rd->overutilized = overutilized;
+                       trace_sched_overutilized(overutilized);
+               }
+       } else {
+               if (!env->dst_rq->rd->overutilized && overutilized) {
+                       env->dst_rq->rd->overutilized = true;
+                       trace_sched_overutilized(true);
+               }
        }
 
 }
@@ -6682,6 +7741,22 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
         */
        if (busiest->avg_load <= sds->avg_load ||
            local->avg_load >= sds->avg_load) {
+               /* Misfitting tasks should be migrated in any case */
+               if (busiest->group_type == group_misfit_task) {
+                       env->imbalance = busiest->group_misfit_task;
+                       return;
+               }
+
+               /*
+                * Busiest group is overloaded, local is not, use the spare
+                * cycles to maximize throughput
+                */
+               if (busiest->group_type == group_overloaded &&
+                   local->group_type <= group_misfit_task) {
+                       env->imbalance = busiest->load_per_task;
+                       return;
+               }
+
                env->imbalance = 0;
                return fix_small_imbalance(env, sds);
        }
@@ -6715,6 +7790,11 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
                (sds->avg_load - local->avg_load) * local->group_capacity
        ) / SCHED_CAPACITY_SCALE;
 
+       /* Boost imbalance to allow misfit task to be balanced. */
+       if (busiest->group_type == group_misfit_task)
+               env->imbalance = max_t(long, env->imbalance,
+                                    busiest->group_misfit_task);
+
        /*
         * if *imbalance is less than the average load per runnable task
         * there is no guarantee that any tasks will be moved so we'll have
@@ -6756,6 +7836,10 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
         * this level.
         */
        update_sd_lb_stats(env, &sds);
+
+       if (energy_aware() && !env->dst_rq->rd->overutilized)
+               goto out_balanced;
+
        local = &sds.local_stat;
        busiest = &sds.busiest_stat;
 
@@ -6784,6 +7868,11 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
            busiest->group_no_capacity)
                goto force_balance;
 
+       /* Misfitting tasks should be dealt with regardless of the avg load */
+       if (busiest->group_type == group_misfit_task) {
+               goto force_balance;
+       }
+
        /*
         * If the local group is busier than the selected busiest group
         * don't try and pull any tasks.
@@ -6807,7 +7896,8 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
                 * might end up to just move the imbalance on another group
                 */
                if ((busiest->group_type != group_overloaded) &&
-                               (local->idle_cpus <= (busiest->idle_cpus + 1)))
+                   (local->idle_cpus <= (busiest->idle_cpus + 1)) &&
+                   !group_smaller_cpu_capacity(sds.busiest, sds.local))
                        goto out_balanced;
        } else {
                /*
@@ -6820,6 +7910,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
        }
 
 force_balance:
+       env->busiest_group_type = busiest->group_type;
        /* Looks like there is an imbalance. Compute it */
        calculate_imbalance(env, &sds);
        return sds.busiest;
@@ -6878,7 +7969,8 @@ static struct rq *find_busiest_queue(struct lb_env *env,
                 */
 
                if (rq->nr_running == 1 && wl > env->imbalance &&
-                   !check_cpu_capacity(rq, env->sd))
+                   !check_cpu_capacity(rq, env->sd) &&
+                   env->busiest_group_type != group_misfit_task)
                        continue;
 
                /*
@@ -6939,6 +8031,13 @@ static int need_active_balance(struct lb_env *env)
                        return 1;
        }
 
+       if ((capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) &&
+                               env->src_rq->cfs.h_nr_running == 1 &&
+                               cpu_overutilized(env->src_cpu) &&
+                               !cpu_overutilized(env->dst_cpu)) {
+                       return 1;
+       }
+
        return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
 }
 
@@ -7060,6 +8159,11 @@ more_balance:
                 * ld_moved     - cumulative load moved across iterations
                 */
                cur_ld_moved = detach_tasks(&env);
+               /*
+                * We want to potentially lower env.src_cpu's OPP.
+                */
+               if (cur_ld_moved)
+                       update_capacity_of(env.src_cpu);
 
                /*
                 * We've detached some tasks from busiest_rq. Every
@@ -7151,7 +8255,8 @@ more_balance:
                 * excessive cache_hot migrations and active balances.
                 */
                if (idle != CPU_NEWLY_IDLE)
-                       sd->nr_balance_failed++;
+                       if (env.src_grp_nr_running > 1)
+                               sd->nr_balance_failed++;
 
                if (need_active_balance(&env)) {
                        raw_spin_lock_irqsave(&busiest->lock, flags);
@@ -7283,6 +8388,7 @@ static int idle_balance(struct rq *this_rq)
        struct sched_domain *sd;
        int pulled_task = 0;
        u64 curr_cost = 0;
+       long removed_util=0;
 
        idle_enter_fair(this_rq);
 
@@ -7292,8 +8398,9 @@ static int idle_balance(struct rq *this_rq)
         */
        this_rq->idle_stamp = rq_clock(this_rq);
 
-       if (this_rq->avg_idle < sysctl_sched_migration_cost ||
-           !this_rq->rd->overload) {
+       if (!energy_aware() &&
+           (this_rq->avg_idle < sysctl_sched_migration_cost ||
+            !this_rq->rd->overload)) {
                rcu_read_lock();
                sd = rcu_dereference_check_sched_domain(this_rq->sd);
                if (sd)
@@ -7305,6 +8412,17 @@ static int idle_balance(struct rq *this_rq)
 
        raw_spin_unlock(&this_rq->lock);
 
+       /*
+        * If removed_util_avg is !0 we most probably migrated some task away
+        * from this_cpu. In this case we might be willing to trigger an OPP
+        * update, but we want to do so if we don't find anybody else to pull
+        * here (we will trigger an OPP update with the pulled task's enqueue
+        * anyway).
+        *
+        * Record removed_util before calling update_blocked_averages, and use
+        * it below (before returning) to see if an OPP update is required.
+        */
+       removed_util = atomic_long_read(&(this_rq->cfs).removed_util_avg);
        update_blocked_averages(this_cpu);
        rcu_read_lock();
        for_each_domain(this_cpu, sd) {
@@ -7369,6 +8487,12 @@ out:
        if (pulled_task) {
                idle_exit_fair(this_rq);
                this_rq->idle_stamp = 0;
+       } else if (removed_util) {
+               /*
+                * No task pulled and someone has been migrated away.
+                * Good case to trigger an OPP update.
+                */
+               update_capacity_of(this_cpu);
        }
 
        return pulled_task;
@@ -7428,8 +8552,13 @@ static int active_load_balance_cpu_stop(void *data)
                schedstat_inc(sd, alb_count);
 
                p = detach_one_task(&env);
-               if (p)
+               if (p) {
                        schedstat_inc(sd, alb_pushed);
+                       /*
+                        * We want to potentially lower env.src_cpu's OPP.
+                        */
+                       update_capacity_of(env.src_cpu);
+               }
                else
                        schedstat_inc(sd, alb_failed);
        }
@@ -7809,12 +8938,13 @@ static inline bool nohz_kick_needed(struct rq *rq)
        if (time_before(now, nohz.next_balance))
                return false;
 
-       if (rq->nr_running >= 2)
+       if (rq->nr_running >= 2 &&
+           (!energy_aware() || cpu_overutilized(cpu)))
                return true;
 
        rcu_read_lock();
        sd = rcu_dereference(per_cpu(sd_busy, cpu));
-       if (sd) {
+       if (sd && !energy_aware()) {
                sgc = sd->groups->sgc;
                nr_busy = atomic_read(&sgc->nr_busy_cpus);
 
@@ -7920,6 +9050,16 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
 
        if (static_branch_unlikely(&sched_numa_balancing))
                task_tick_numa(rq, curr);
+
+#ifdef CONFIG_SMP
+       if (!rq->rd->overutilized && cpu_overutilized(task_cpu(curr))) {
+               rq->rd->overutilized = true;
+               trace_sched_overutilized(true);
+       }
+
+       rq->misfit_task = !task_fits_max(curr, rq->cpu);
+#endif
+
 }
 
 /*
index 69631fa46c2f84fecd3e15599cba0e5935c1148e..55e461055332971c2d5b0e9a036b462050d57144 100644 (file)
@@ -69,3 +69,12 @@ SCHED_FEAT(RT_RUNTIME_SHARE, true)
 SCHED_FEAT(LB_MIN, false)
 SCHED_FEAT(ATTACH_AGE_LOAD, true)
 
+/*
+ * Energy aware scheduling. Use platform energy model to guide scheduling
+ * decisions optimizing for energy efficiency.
+ */
+#ifdef CONFIG_DEFAULT_USE_ENERGY_AWARE
+SCHED_FEAT(ENERGY_AWARE, true)
+#else
+SCHED_FEAT(ENERGY_AWARE, false)
+#endif
index 4a2ef5a02fd3f91d7c4228378c23d5606bb73812..917c94abf5bbaebfb332f2df17f85fcf2bfc1cfa 100644 (file)
  * sched_idle_set_state - Record idle state for the current CPU.
  * @idle_state: State to record.
  */
-void sched_idle_set_state(struct cpuidle_state *idle_state)
+void sched_idle_set_state(struct cpuidle_state *idle_state, int index)
 {
        idle_set_state(this_rq(), idle_state);
+       idle_set_state_idx(this_rq(), index);
 }
 
 static int __read_mostly cpu_idle_force_poll;
@@ -219,6 +220,7 @@ static void cpu_idle_loop(void)
                 */
 
                __current_set_polling();
+               quiet_vmstat();
                tick_nohz_idle_enter();
 
                while (!need_resched()) {
index 8ec86abe0ea188369ee4e7efd21789d8cb127c14..8a16cba968c43c981b0e2671a5e1e2d6cc8e341a 100644 (file)
@@ -8,6 +8,8 @@
 #include <linux/slab.h>
 #include <linux/irq_work.h>
 
+#include "walt.h"
+
 int sched_rr_timeslice = RR_TIMESLICE;
 
 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
@@ -889,6 +891,51 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
        return rt_task_of(rt_se)->prio;
 }
 
+static void dump_throttled_rt_tasks(struct rt_rq *rt_rq)
+{
+       struct rt_prio_array *array = &rt_rq->active;
+       struct sched_rt_entity *rt_se;
+       char buf[500];
+       char *pos = buf;
+       char *end = buf + sizeof(buf);
+       int idx;
+
+       pos += snprintf(pos, sizeof(buf),
+               "sched: RT throttling activated for rt_rq %p (cpu %d)\n",
+               rt_rq, cpu_of(rq_of_rt_rq(rt_rq)));
+
+       if (bitmap_empty(array->bitmap, MAX_RT_PRIO))
+               goto out;
+
+       pos += snprintf(pos, end - pos, "potential CPU hogs:\n");
+       idx = sched_find_first_bit(array->bitmap);
+       while (idx < MAX_RT_PRIO) {
+               list_for_each_entry(rt_se, array->queue + idx, run_list) {
+                       struct task_struct *p;
+
+                       if (!rt_entity_is_task(rt_se))
+                               continue;
+
+                       p = rt_task_of(rt_se);
+                       if (pos < end)
+                               pos += snprintf(pos, end - pos, "\t%s (%d)\n",
+                                       p->comm, p->pid);
+               }
+               idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx + 1);
+       }
+out:
+#ifdef CONFIG_PANIC_ON_RT_THROTTLING
+       /*
+        * Use pr_err() in the BUG() case since printk_sched() will
+        * not get flushed and deadlock is not a concern.
+        */
+       pr_err("%s", buf);
+       BUG();
+#else
+       printk_deferred("%s", buf);
+#endif
+}
+
 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 {
        u64 runtime = sched_rt_runtime(rt_rq);
@@ -912,8 +959,14 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
                 * but accrue some time due to boosting.
                 */
                if (likely(rt_b->rt_runtime)) {
+                       static bool once = false;
+
                        rt_rq->rt_throttled = 1;
-                       printk_deferred_once("sched: RT throttling activated\n");
+
+                       if (!once) {
+                               once = true;
+                               dump_throttled_rt_tasks(rt_rq);
+                       }
                } else {
                        /*
                         * In case we did anyway, make it go away,
@@ -1261,6 +1314,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
                rt_se->timeout = 0;
 
        enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
+       walt_inc_cumulative_runnable_avg(rq, p);
 
        if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
@@ -1272,6 +1326,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
 
        update_curr_rt(rq);
        dequeue_rt_entity(rt_se);
+       walt_dec_cumulative_runnable_avg(rq, p);
 
        dequeue_pushable_task(rq, p);
 }
@@ -1426,6 +1481,41 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
 #endif
 }
 
+#ifdef CONFIG_SMP
+static void sched_rt_update_capacity_req(struct rq *rq)
+{
+       u64 total, used, age_stamp, avg;
+       s64 delta;
+
+       if (!sched_freq())
+               return;
+
+       sched_avg_update(rq);
+       /*
+        * Since we're reading these variables without serialization make sure
+        * we read them once before doing sanity checks on them.
+        */
+       age_stamp = READ_ONCE(rq->age_stamp);
+       avg = READ_ONCE(rq->rt_avg);
+       delta = rq_clock(rq) - age_stamp;
+
+       if (unlikely(delta < 0))
+               delta = 0;
+
+       total = sched_avg_period() + delta;
+
+       used = div_u64(avg, total);
+       if (unlikely(used > SCHED_CAPACITY_SCALE))
+               used = SCHED_CAPACITY_SCALE;
+
+       set_rt_cpu_capacity(rq->cpu, 1, (unsigned long)(used));
+}
+#else
+static inline void sched_rt_update_capacity_req(struct rq *rq)
+{ }
+
+#endif
+
 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
                                                   struct rt_rq *rt_rq)
 {
@@ -1494,8 +1584,17 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
        if (prev->sched_class == &rt_sched_class)
                update_curr_rt(rq);
 
-       if (!rt_rq->rt_queued)
+       if (!rt_rq->rt_queued) {
+               /*
+                * The next task to be picked on this rq will have a lower
+                * priority than rt tasks so we can spend some time to update
+                * the capacity used by rt tasks based on the last activity.
+                * This value will be the used as an estimation of the next
+                * activity.
+                */
+               sched_rt_update_capacity_req(rq);
                return NULL;
+       }
 
        put_prev_task(rq, prev);
 
@@ -2212,6 +2311,9 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
 
        update_curr_rt(rq);
 
+       if (rq->rt.rt_nr_running)
+               sched_rt_update_capacity_req(rq);
+
        watchdog(rq, p);
 
        /*
index 0517abd7dd73b95f152f339b684328165d76f59f..2f2b959ad24485ca8eaf2907b2fc66d58e04bc33 100644 (file)
@@ -410,6 +410,10 @@ struct cfs_rq {
        struct list_head leaf_cfs_rq_list;
        struct task_group *tg;  /* group that "owns" this runqueue */
 
+#ifdef CONFIG_SCHED_WALT
+       u64 cumulative_runnable_avg;
+#endif
+
 #ifdef CONFIG_CFS_BANDWIDTH
        int runtime_enabled;
        u64 runtime_expires;
@@ -506,10 +510,18 @@ struct dl_rq {
 #else
        struct dl_bw dl_bw;
 #endif
+       /* This is the "average utilization" for this runqueue */
+       s64 avg_bw;
 };
 
 #ifdef CONFIG_SMP
 
+struct max_cpu_capacity {
+       raw_spinlock_t lock;
+       unsigned long val;
+       int cpu;
+};
+
 /*
  * We add the notion of a root-domain which will be used to define per-domain
  * variables. Each exclusive cpuset essentially defines an island domain by
@@ -528,6 +540,9 @@ struct root_domain {
        /* Indicate more than one runnable task for any CPU */
        bool overload;
 
+       /* Indicate one or more cpus over-utilized (tipping point) */
+       bool overutilized;
+
        /*
         * The bit corresponding to a CPU gets set here if such CPU has more
         * than one runnable -deadline task (as it is below for RT tasks).
@@ -543,6 +558,9 @@ struct root_domain {
         */
        cpumask_var_t rto_mask;
        struct cpupri cpupri;
+
+       /* Maximum cpu capacity in the system. */
+       struct max_cpu_capacity max_cpu_capacity;
 };
 
 extern struct root_domain def_root_domain;
@@ -572,6 +590,7 @@ struct rq {
        #define CPU_LOAD_IDX_MAX 5
        unsigned long cpu_load[CPU_LOAD_IDX_MAX];
        unsigned long last_load_update_tick;
+       unsigned int misfit_task;
 #ifdef CONFIG_NO_HZ_COMMON
        u64 nohz_stamp;
        unsigned long nohz_flags;
@@ -579,6 +598,14 @@ struct rq {
 #ifdef CONFIG_NO_HZ_FULL
        unsigned long last_sched_tick;
 #endif
+
+#ifdef CONFIG_CPU_QUIET
+       /* time-based average load */
+       u64 nr_last_stamp;
+       u64 nr_running_integral;
+       seqcount_t ave_seqcnt;
+#endif
+
        /* capture load from *all* tasks on this cpu: */
        struct load_weight load;
        unsigned long nr_load_updates;
@@ -640,6 +667,30 @@ struct rq {
        u64 max_idle_balance_cost;
 #endif
 
+#ifdef CONFIG_SCHED_WALT
+       /*
+        * max_freq = user or thermal defined maximum
+        * max_possible_freq = maximum supported by hardware
+        */
+       unsigned int cur_freq, max_freq, min_freq, max_possible_freq;
+       struct cpumask freq_domain_cpumask;
+
+       u64 cumulative_runnable_avg;
+       int efficiency; /* Differentiate cpus with different IPC capability */
+       int load_scale_factor;
+       int capacity;
+       int max_possible_capacity;
+       u64 window_start;
+       u64 curr_runnable_sum;
+       u64 prev_runnable_sum;
+       u64 nt_curr_runnable_sum;
+       u64 nt_prev_runnable_sum;
+       u64 cur_irqload;
+       u64 avg_irqload;
+       u64 irqload_ts;
+#endif /* CONFIG_SCHED_WALT */
+
+
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
        u64 prev_irq_time;
 #endif
@@ -687,6 +738,7 @@ struct rq {
 #ifdef CONFIG_CPU_IDLE
        /* Must be inspected within a rcu lock section */
        struct cpuidle_state *idle_state;
+       int idle_state_idx;
 #endif
 };
 
@@ -836,6 +888,8 @@ DECLARE_PER_CPU(int, sd_llc_id);
 DECLARE_PER_CPU(struct sched_domain *, sd_numa);
 DECLARE_PER_CPU(struct sched_domain *, sd_busy);
 DECLARE_PER_CPU(struct sched_domain *, sd_asym);
+DECLARE_PER_CPU(struct sched_domain *, sd_ea);
+DECLARE_PER_CPU(struct sched_domain *, sd_scs);
 
 struct sched_group_capacity {
        atomic_t ref;
@@ -843,7 +897,8 @@ struct sched_group_capacity {
         * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
         * for a single CPU.
         */
-       unsigned int capacity;
+       unsigned long capacity;
+       unsigned long max_capacity; /* Max per-cpu capacity in group */
        unsigned long next_update;
        int imbalance; /* XXX unrelated to capacity but shared group state */
        /*
@@ -860,6 +915,7 @@ struct sched_group {
 
        unsigned int group_weight;
        struct sched_group_capacity *sgc;
+       const struct sched_group_energy const *sge;
 
        /*
         * The CPUs this group covers.
@@ -1163,6 +1219,7 @@ static const u32 prio_to_wmult[40] = {
 #endif
 #define ENQUEUE_REPLENISH      0x08
 #define ENQUEUE_RESTORE        0x10
+#define ENQUEUE_WAKEUP_NEW     0x20
 
 #define DEQUEUE_SLEEP          0x01
 #define DEQUEUE_SAVE           0x02
@@ -1248,6 +1305,7 @@ extern const struct sched_class idle_sched_class;
 
 #ifdef CONFIG_SMP
 
+extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc);
 extern void update_group_capacity(struct sched_domain *sd, int cpu);
 
 extern void trigger_load_balance(struct rq *rq);
@@ -1276,6 +1334,17 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq)
        WARN_ON(!rcu_read_lock_held());
        return rq->idle_state;
 }
+
+static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
+{
+       rq->idle_state_idx = idle_state_idx;
+}
+
+static inline int idle_get_state_idx(struct rq *rq)
+{
+       WARN_ON(!rcu_read_lock_held());
+       return rq->idle_state_idx;
+}
 #else
 static inline void idle_set_state(struct rq *rq,
                                  struct cpuidle_state *idle_state)
@@ -1286,6 +1355,15 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq)
 {
        return NULL;
 }
+
+static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx)
+{
+}
+
+static inline int idle_get_state_idx(struct rq *rq)
+{
+       return -1;
+}
 #endif
 
 extern void sysrq_sched_debug_show(void);
@@ -1310,7 +1388,7 @@ unsigned long to_ratio(u64 period, u64 runtime);
 
 extern void init_entity_runnable_average(struct sched_entity *se);
 
-static inline void add_nr_running(struct rq *rq, unsigned count)
+static inline void __add_nr_running(struct rq *rq, unsigned count)
 {
        unsigned prev_nr = rq->nr_running;
 
@@ -1338,11 +1416,48 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
        }
 }
 
-static inline void sub_nr_running(struct rq *rq, unsigned count)
+static inline void __sub_nr_running(struct rq *rq, unsigned count)
 {
        rq->nr_running -= count;
 }
 
+#ifdef CONFIG_CPU_QUIET
+#define NR_AVE_SCALE(x)                ((x) << FSHIFT)
+static inline u64 do_nr_running_integral(struct rq *rq)
+{
+       s64 nr, deltax;
+       u64 nr_running_integral = rq->nr_running_integral;
+
+       deltax = rq->clock_task - rq->nr_last_stamp;
+       nr = NR_AVE_SCALE(rq->nr_running);
+
+       nr_running_integral += nr * deltax;
+
+       return nr_running_integral;
+}
+
+static inline void add_nr_running(struct rq *rq, unsigned count)
+{
+       write_seqcount_begin(&rq->ave_seqcnt);
+       rq->nr_running_integral = do_nr_running_integral(rq);
+       rq->nr_last_stamp = rq->clock_task;
+       __add_nr_running(rq, count);
+       write_seqcount_end(&rq->ave_seqcnt);
+}
+
+static inline void sub_nr_running(struct rq *rq, unsigned count)
+{
+       write_seqcount_begin(&rq->ave_seqcnt);
+       rq->nr_running_integral = do_nr_running_integral(rq);
+       rq->nr_last_stamp = rq->clock_task;
+       __sub_nr_running(rq, count);
+       write_seqcount_end(&rq->ave_seqcnt);
+}
+#else
+#define add_nr_running __add_nr_running
+#define sub_nr_running __sub_nr_running
+#endif
+
 static inline void rq_last_tick_reset(struct rq *rq)
 {
 #ifdef CONFIG_NO_HZ_FULL
@@ -1415,10 +1530,146 @@ unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
 }
 #endif
 
+#ifdef CONFIG_SMP
+static inline unsigned long capacity_of(int cpu)
+{
+       return cpu_rq(cpu)->cpu_capacity;
+}
+
+static inline unsigned long capacity_orig_of(int cpu)
+{
+       return cpu_rq(cpu)->cpu_capacity_orig;
+}
+
+extern unsigned int sysctl_sched_use_walt_cpu_util;
+extern unsigned int walt_ravg_window;
+extern unsigned int walt_disabled;
+
+/*
+ * cpu_util returns the amount of capacity of a CPU that is used by CFS
+ * tasks. The unit of the return value must be the one of capacity so we can
+ * compare the utilization with the capacity of the CPU that is available for
+ * CFS task (ie cpu_capacity).
+ *
+ * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
+ * recent utilization of currently non-runnable tasks on a CPU. It represents
+ * the amount of utilization of a CPU in the range [0..capacity_orig] where
+ * capacity_orig is the cpu_capacity available at the highest frequency
+ * (arch_scale_freq_capacity()).
+ * The utilization of a CPU converges towards a sum equal to or less than the
+ * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
+ * the running time on this CPU scaled by capacity_curr.
+ *
+ * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
+ * higher than capacity_orig because of unfortunate rounding in
+ * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
+ * the average stabilizes with the new running time. We need to check that the
+ * utilization stays within the range of [0..capacity_orig] and cap it if
+ * necessary. Without utilization capping, a group could be seen as overloaded
+ * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
+ * available capacity. We allow utilization to overshoot capacity_curr (but not
+ * capacity_orig) as it useful for predicting the capacity required after task
+ * migrations (scheduler-driven DVFS).
+ */
+static inline unsigned long __cpu_util(int cpu, int delta)
+{
+       unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
+       unsigned long capacity = capacity_orig_of(cpu);
+
+#ifdef CONFIG_SCHED_WALT
+       if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
+               util = cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT;
+               do_div(util, walt_ravg_window);
+       }
+#endif
+       delta += util;
+       if (delta < 0)
+               return 0;
+
+       return (delta >= capacity) ? capacity : delta;
+}
+
+static inline unsigned long cpu_util(int cpu)
+{
+       return __cpu_util(cpu, 0);
+}
+
+#endif
+
+#ifdef CONFIG_CPU_FREQ_GOV_SCHED
+#define capacity_max SCHED_CAPACITY_SCALE
+extern unsigned int capacity_margin;
+extern struct static_key __sched_freq;
+
+static inline bool sched_freq(void)
+{
+       return static_key_false(&__sched_freq);
+}
+
+DECLARE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
+void update_cpu_capacity_request(int cpu, bool request);
+
+static inline void set_cfs_cpu_capacity(int cpu, bool request,
+                                       unsigned long capacity)
+{
+       struct sched_capacity_reqs *scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
+
+#ifdef CONFIG_SCHED_WALT
+       if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
+               int rtdl = scr->rt + scr->dl;
+               /*
+                * WALT tracks the utilization of a CPU considering the load
+                * generated by all the scheduling classes.
+                * Since the following call to:
+                *    update_cpu_capacity
+                * is already adding the RT and DL utilizations let's remove
+                * these contributions from the WALT signal.
+                */
+               if (capacity > rtdl)
+                       capacity -= rtdl;
+               else
+                       capacity = 0;
+       }
+#endif
+       if (scr->cfs != capacity) {
+               scr->cfs = capacity;
+               update_cpu_capacity_request(cpu, request);
+       }
+}
+
+static inline void set_rt_cpu_capacity(int cpu, bool request,
+                                      unsigned long capacity)
+{
+       if (per_cpu(cpu_sched_capacity_reqs, cpu).rt != capacity) {
+               per_cpu(cpu_sched_capacity_reqs, cpu).rt = capacity;
+               update_cpu_capacity_request(cpu, request);
+       }
+}
+
+static inline void set_dl_cpu_capacity(int cpu, bool request,
+                                      unsigned long capacity)
+{
+       if (per_cpu(cpu_sched_capacity_reqs, cpu).dl != capacity) {
+               per_cpu(cpu_sched_capacity_reqs, cpu).dl = capacity;
+               update_cpu_capacity_request(cpu, request);
+       }
+}
+#else
+static inline bool sched_freq(void) { return false; }
+static inline void set_cfs_cpu_capacity(int cpu, bool request,
+                                       unsigned long capacity)
+{ }
+static inline void set_rt_cpu_capacity(int cpu, bool request,
+                                      unsigned long capacity)
+{ }
+static inline void set_dl_cpu_capacity(int cpu, bool request,
+                                      unsigned long capacity)
+{ }
+#endif
+
 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 {
        rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
-       sched_avg_update(rq);
 }
 #else
 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
@@ -1507,6 +1758,9 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
        raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
 }
 
+extern struct rq *lock_rq_of(struct task_struct *p, unsigned long *flags);
+extern void unlock_rq_of(struct rq *rq, struct task_struct *p, unsigned long *flags);
+
 #ifdef CONFIG_SMP
 #ifdef CONFIG_PREEMPT
 
@@ -1579,7 +1833,8 @@ static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
        __releases(busiest->lock)
 {
-       raw_spin_unlock(&busiest->lock);
+       if (this_rq != busiest)
+               raw_spin_unlock(&busiest->lock);
        lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
 }
 
index cbc67da109544c4f0841b609e44d7337650aa81c..61f852d46858c868fda8b00601311b7837f66552 100644 (file)
@@ -1,4 +1,5 @@
 #include "sched.h"
+#include "walt.h"
 
 /*
  * stop-task scheduling class.
@@ -42,12 +43,14 @@ static void
 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
 {
        add_nr_running(rq, 1);
+       walt_inc_cumulative_runnable_avg(rq, p);
 }
 
 static void
 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
 {
        sub_nr_running(rq, 1);
+       walt_dec_cumulative_runnable_avg(rq, p);
 }
 
 static void yield_task_stop(struct rq *rq)
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
new file mode 100644 (file)
index 0000000..079b188
--- /dev/null
@@ -0,0 +1,945 @@
+#include <linux/cgroup.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/printk.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+
+#include <trace/events/sched.h>
+
+#include "sched.h"
+#include "tune.h"
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+static bool schedtune_initialized = false;
+#endif
+
+unsigned int sysctl_sched_cfs_boost __read_mostly;
+
+extern struct target_nrg schedtune_target_nrg;
+
+/* Performance Boost region (B) threshold params */
+static int perf_boost_idx;
+
+/* Performance Constraint region (C) threshold params */
+static int perf_constrain_idx;
+
+/**
+ * Performance-Energy (P-E) Space thresholds constants
+ */
+struct threshold_params {
+       int nrg_gain;
+       int cap_gain;
+};
+
+/*
+ * System specific P-E space thresholds constants
+ */
+static struct threshold_params
+threshold_gains[] = {
+       { 0, 5 }, /*   < 10% */
+       { 1, 5 }, /*   < 20% */
+       { 2, 5 }, /*   < 30% */
+       { 3, 5 }, /*   < 40% */
+       { 4, 5 }, /*   < 50% */
+       { 5, 4 }, /*   < 60% */
+       { 5, 3 }, /*   < 70% */
+       { 5, 2 }, /*   < 80% */
+       { 5, 1 }, /*   < 90% */
+       { 5, 0 }  /* <= 100% */
+};
+
+static int
+__schedtune_accept_deltas(int nrg_delta, int cap_delta,
+                         int perf_boost_idx, int perf_constrain_idx)
+{
+       int payoff = -INT_MAX;
+       int gain_idx = -1;
+
+       /* Performance Boost (B) region */
+       if (nrg_delta >= 0 && cap_delta > 0)
+               gain_idx = perf_boost_idx;
+       /* Performance Constraint (C) region */
+       else if (nrg_delta < 0 && cap_delta <= 0)
+               gain_idx = perf_constrain_idx;
+
+       /* Default: reject schedule candidate */
+       if (gain_idx == -1)
+               return payoff;
+
+       /*
+        * Evaluate "Performance Boost" vs "Energy Increase"
+        *
+        * - Performance Boost (B) region
+        *
+        *   Condition: nrg_delta > 0 && cap_delta > 0
+        *   Payoff criteria:
+        *     cap_gain / nrg_gain  < cap_delta / nrg_delta =
+        *     cap_gain * nrg_delta < cap_delta * nrg_gain
+        *   Note that since both nrg_gain and nrg_delta are positive, the
+        *   inequality does not change. Thus:
+        *
+        *     payoff = (cap_delta * nrg_gain) - (cap_gain * nrg_delta)
+        *
+        * - Performance Constraint (C) region
+        *
+        *   Condition: nrg_delta < 0 && cap_delta < 0
+        *   payoff criteria:
+        *     cap_gain / nrg_gain  > cap_delta / nrg_delta =
+        *     cap_gain * nrg_delta < cap_delta * nrg_gain
+        *   Note that since nrg_gain > 0 while nrg_delta < 0, the
+        *   inequality change. Thus:
+        *
+        *     payoff = (cap_delta * nrg_gain) - (cap_gain * nrg_delta)
+        *
+        * This means that, in case of same positive defined {cap,nrg}_gain
+        * for both the B and C regions, we can use the same payoff formula
+        * where a positive value represents the accept condition.
+        */
+       payoff  = cap_delta * threshold_gains[gain_idx].nrg_gain;
+       payoff -= nrg_delta * threshold_gains[gain_idx].cap_gain;
+
+       return payoff;
+}
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+
+/*
+ * EAS scheduler tunables for task groups.
+ */
+
+/* SchdTune tunables for a group of tasks */
+struct schedtune {
+       /* SchedTune CGroup subsystem */
+       struct cgroup_subsys_state css;
+
+       /* Boost group allocated ID */
+       int idx;
+
+       /* Boost value for tasks on that SchedTune CGroup */
+       int boost;
+
+       /* Performance Boost (B) region threshold params */
+       int perf_boost_idx;
+
+       /* Performance Constraint (C) region threshold params */
+       int perf_constrain_idx;
+
+       /* Hint to bias scheduling of tasks on that SchedTune CGroup
+        * towards idle CPUs */
+       int prefer_idle;
+};
+
+static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
+{
+       return css ? container_of(css, struct schedtune, css) : NULL;
+}
+
+static inline struct schedtune *task_schedtune(struct task_struct *tsk)
+{
+       return css_st(task_css(tsk, schedtune_cgrp_id));
+}
+
+static inline struct schedtune *parent_st(struct schedtune *st)
+{
+       return css_st(st->css.parent);
+}
+
+/*
+ * SchedTune root control group
+ * The root control group is used to defined a system-wide boosting tuning,
+ * which is applied to all tasks in the system.
+ * Task specific boost tuning could be specified by creating and
+ * configuring a child control group under the root one.
+ * By default, system-wide boosting is disabled, i.e. no boosting is applied
+ * to tasks which are not into a child control group.
+ */
+static struct schedtune
+root_schedtune = {
+       .boost  = 0,
+       .perf_boost_idx = 0,
+       .perf_constrain_idx = 0,
+       .prefer_idle = 0,
+};
+
+int
+schedtune_accept_deltas(int nrg_delta, int cap_delta,
+                       struct task_struct *task)
+{
+       struct schedtune *ct;
+       int perf_boost_idx;
+       int perf_constrain_idx;
+
+       /* Optimal (O) region */
+       if (nrg_delta < 0 && cap_delta > 0) {
+               trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, 1, 0);
+               return INT_MAX;
+       }
+
+       /* Suboptimal (S) region */
+       if (nrg_delta > 0 && cap_delta < 0) {
+               trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, -1, 5);
+               return -INT_MAX;
+       }
+
+       /* Get task specific perf Boost/Constraints indexes */
+       rcu_read_lock();
+       ct = task_schedtune(task);
+       perf_boost_idx = ct->perf_boost_idx;
+       perf_constrain_idx = ct->perf_constrain_idx;
+       rcu_read_unlock();
+
+       return __schedtune_accept_deltas(nrg_delta, cap_delta,
+                       perf_boost_idx, perf_constrain_idx);
+}
+
+/*
+ * Maximum number of boost groups to support
+ * When per-task boosting is used we still allow only limited number of
+ * boost groups for two main reasons:
+ * 1. on a real system we usually have only few classes of workloads which
+ *    make sense to boost with different values (e.g. background vs foreground
+ *    tasks, interactive vs low-priority tasks)
+ * 2. a limited number allows for a simpler and more memory/time efficient
+ *    implementation especially for the computation of the per-CPU boost
+ *    value
+ */
+#define BOOSTGROUPS_COUNT 4
+
+/* Array of configured boostgroups */
+static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
+       &root_schedtune,
+       NULL,
+};
+
+/* SchedTune boost groups
+ * Keep track of all the boost groups which impact on CPU, for example when a
+ * CPU has two RUNNABLE tasks belonging to two different boost groups and thus
+ * likely with different boost values.
+ * Since on each system we expect only a limited number of boost groups, here
+ * we use a simple array to keep track of the metrics required to compute the
+ * maximum per-CPU boosting value.
+ */
+struct boost_groups {
+       /* Maximum boost value for all RUNNABLE tasks on a CPU */
+       bool idle;
+       int boost_max;
+       struct {
+               /* The boost for tasks on that boost group */
+               int boost;
+               /* Count of RUNNABLE tasks on that boost group */
+               unsigned tasks;
+       } group[BOOSTGROUPS_COUNT];
+       /* CPU's boost group locking */
+       raw_spinlock_t lock;
+};
+
+/* Boost groups affecting each CPU in the system */
+DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups);
+
+static void
+schedtune_cpu_update(int cpu)
+{
+       struct boost_groups *bg;
+       int boost_max;
+       int idx;
+
+       bg = &per_cpu(cpu_boost_groups, cpu);
+
+       /* The root boost group is always active */
+       boost_max = bg->group[0].boost;
+       for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx) {
+               /*
+                * A boost group affects a CPU only if it has
+                * RUNNABLE tasks on that CPU
+                */
+               if (bg->group[idx].tasks == 0)
+                       continue;
+
+               boost_max = max(boost_max, bg->group[idx].boost);
+       }
+       /* Ensures boost_max is non-negative when all cgroup boost values
+        * are neagtive. Avoids under-accounting of cpu capacity which may cause
+        * task stacking and frequency spikes.*/
+       boost_max = max(boost_max, 0);
+       bg->boost_max = boost_max;
+}
+
+static int
+schedtune_boostgroup_update(int idx, int boost)
+{
+       struct boost_groups *bg;
+       int cur_boost_max;
+       int old_boost;
+       int cpu;
+
+       /* Update per CPU boost groups */
+       for_each_possible_cpu(cpu) {
+               bg = &per_cpu(cpu_boost_groups, cpu);
+
+               /*
+                * Keep track of current boost values to compute the per CPU
+                * maximum only when it has been affected by the new value of
+                * the updated boost group
+                */
+               cur_boost_max = bg->boost_max;
+               old_boost = bg->group[idx].boost;
+
+               /* Update the boost value of this boost group */
+               bg->group[idx].boost = boost;
+
+               /* Check if this update increase current max */
+               if (boost > cur_boost_max && bg->group[idx].tasks) {
+                       bg->boost_max = boost;
+                       trace_sched_tune_boostgroup_update(cpu, 1, bg->boost_max);
+                       continue;
+               }
+
+               /* Check if this update has decreased current max */
+               if (cur_boost_max == old_boost && old_boost > boost) {
+                       schedtune_cpu_update(cpu);
+                       trace_sched_tune_boostgroup_update(cpu, -1, bg->boost_max);
+                       continue;
+               }
+
+               trace_sched_tune_boostgroup_update(cpu, 0, bg->boost_max);
+       }
+
+       return 0;
+}
+
+#define ENQUEUE_TASK  1
+#define DEQUEUE_TASK -1
+
+static inline void
+schedtune_tasks_update(struct task_struct *p, int cpu, int idx, int task_count)
+{
+       struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
+       int tasks = bg->group[idx].tasks + task_count;
+
+       /* Update boosted tasks count while avoiding to make it negative */
+       bg->group[idx].tasks = max(0, tasks);
+
+       trace_sched_tune_tasks_update(p, cpu, tasks, idx,
+                       bg->group[idx].boost, bg->boost_max);
+
+       /* Boost group activation or deactivation on that RQ */
+       if (tasks == 1 || tasks == 0)
+               schedtune_cpu_update(cpu);
+}
+
+/*
+ * NOTE: This function must be called while holding the lock on the CPU RQ
+ */
+void schedtune_enqueue_task(struct task_struct *p, int cpu)
+{
+       struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
+       unsigned long irq_flags;
+       struct schedtune *st;
+       int idx;
+
+       if (!unlikely(schedtune_initialized))
+               return;
+
+       /*
+        * When a task is marked PF_EXITING by do_exit() it's going to be
+        * dequeued and enqueued multiple times in the exit path.
+        * Thus we avoid any further update, since we do not want to change
+        * CPU boosting while the task is exiting.
+        */
+       if (p->flags & PF_EXITING)
+               return;
+
+       /*
+        * Boost group accouting is protected by a per-cpu lock and requires
+        * interrupt to be disabled to avoid race conditions for example on
+        * do_exit()::cgroup_exit() and task migration.
+        */
+       raw_spin_lock_irqsave(&bg->lock, irq_flags);
+       rcu_read_lock();
+
+       st = task_schedtune(p);
+       idx = st->idx;
+
+       schedtune_tasks_update(p, cpu, idx, ENQUEUE_TASK);
+
+       rcu_read_unlock();
+       raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
+}
+
+int schedtune_can_attach(struct cgroup_taskset *tset)
+{
+       struct task_struct *task;
+       struct cgroup_subsys_state *css;
+       struct boost_groups *bg;
+       unsigned long irq_flags;
+       unsigned int cpu;
+       struct rq *rq;
+       int src_bg; /* Source boost group index */
+       int dst_bg; /* Destination boost group index */
+       int tasks;
+
+       if (!unlikely(schedtune_initialized))
+               return 0;
+
+
+       cgroup_taskset_for_each(task, css, tset) {
+
+               /*
+                * Lock the CPU's RQ the task is enqueued to avoid race
+                * conditions with migration code while the task is being
+                * accounted
+                */
+               rq = lock_rq_of(task, &irq_flags);
+
+               if (!task->on_rq) {
+                       unlock_rq_of(rq, task, &irq_flags);
+                       continue;
+               }
+
+               /*
+                * Boost group accouting is protected by a per-cpu lock and requires
+                * interrupt to be disabled to avoid race conditions on...
+                */
+               cpu = cpu_of(rq);
+               bg = &per_cpu(cpu_boost_groups, cpu);
+               raw_spin_lock(&bg->lock);
+
+               dst_bg = css_st(css)->idx;
+               src_bg = task_schedtune(task)->idx;
+
+               /*
+                * Current task is not changing boostgroup, which can
+                * happen when the new hierarchy is in use.
+                */
+               if (unlikely(dst_bg == src_bg)) {
+                       raw_spin_unlock(&bg->lock);
+                       unlock_rq_of(rq, task, &irq_flags);
+                       continue;
+               }
+
+               /*
+                * This is the case of a RUNNABLE task which is switching its
+                * current boost group.
+                */
+
+               /* Move task from src to dst boost group */
+               tasks = bg->group[src_bg].tasks - 1;
+               bg->group[src_bg].tasks = max(0, tasks);
+               bg->group[dst_bg].tasks += 1;
+
+               raw_spin_unlock(&bg->lock);
+               unlock_rq_of(rq, task, &irq_flags);
+
+               /* Update CPU boost group */
+               if (bg->group[src_bg].tasks == 0 || bg->group[dst_bg].tasks == 1)
+                       schedtune_cpu_update(task_cpu(task));
+
+       }
+
+       return 0;
+}
+
+void schedtune_cancel_attach(struct cgroup_taskset *tset)
+{
+       /* This can happen only if SchedTune controller is mounted with
+        * other hierarchies ane one of them fails. Since usually SchedTune is
+        * mouted on its own hierarcy, for the time being we do not implement
+        * a proper rollback mechanism */
+       WARN(1, "SchedTune cancel attach not implemented");
+}
+
+/*
+ * NOTE: This function must be called while holding the lock on the CPU RQ
+ */
+void schedtune_dequeue_task(struct task_struct *p, int cpu)
+{
+       struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu);
+       unsigned long irq_flags;
+       struct schedtune *st;
+       int idx;
+
+       if (!unlikely(schedtune_initialized))
+               return;
+
+       /*
+        * When a task is marked PF_EXITING by do_exit() it's going to be
+        * dequeued and enqueued multiple times in the exit path.
+        * Thus we avoid any further update, since we do not want to change
+        * CPU boosting while the task is exiting.
+        * The last dequeue is already enforce by the do_exit() code path
+        * via schedtune_exit_task().
+        */
+       if (p->flags & PF_EXITING)
+               return;
+
+       /*
+        * Boost group accouting is protected by a per-cpu lock and requires
+        * interrupt to be disabled to avoid race conditions on...
+        */
+       raw_spin_lock_irqsave(&bg->lock, irq_flags);
+       rcu_read_lock();
+
+       st = task_schedtune(p);
+       idx = st->idx;
+
+       schedtune_tasks_update(p, cpu, idx, DEQUEUE_TASK);
+
+       rcu_read_unlock();
+       raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
+}
+
+void schedtune_exit_task(struct task_struct *tsk)
+{
+       struct schedtune *st;
+       unsigned long irq_flags;
+       unsigned int cpu;
+       struct rq *rq;
+       int idx;
+
+       if (!unlikely(schedtune_initialized))
+               return;
+
+       rq = lock_rq_of(tsk, &irq_flags);
+       rcu_read_lock();
+
+       cpu = cpu_of(rq);
+       st = task_schedtune(tsk);
+       idx = st->idx;
+       schedtune_tasks_update(tsk, cpu, idx, DEQUEUE_TASK);
+
+       rcu_read_unlock();
+       unlock_rq_of(rq, tsk, &irq_flags);
+}
+
+int schedtune_cpu_boost(int cpu)
+{
+       struct boost_groups *bg;
+
+       bg = &per_cpu(cpu_boost_groups, cpu);
+       return bg->boost_max;
+}
+
+int schedtune_task_boost(struct task_struct *p)
+{
+       struct schedtune *st;
+       int task_boost;
+
+       /* Get task boost value */
+       rcu_read_lock();
+       st = task_schedtune(p);
+       task_boost = st->boost;
+       rcu_read_unlock();
+
+       return task_boost;
+}
+
+int schedtune_prefer_idle(struct task_struct *p)
+{
+       struct schedtune *st;
+       int prefer_idle;
+
+       /* Get prefer_idle value */
+       rcu_read_lock();
+       st = task_schedtune(p);
+       prefer_idle = st->prefer_idle;
+       rcu_read_unlock();
+
+       return prefer_idle;
+}
+
+static u64
+prefer_idle_read(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+       struct schedtune *st = css_st(css);
+
+       return st->prefer_idle;
+}
+
+static int
+prefer_idle_write(struct cgroup_subsys_state *css, struct cftype *cft,
+           u64 prefer_idle)
+{
+       struct schedtune *st = css_st(css);
+       st->prefer_idle = prefer_idle;
+
+       return 0;
+}
+
+static s64
+boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+       struct schedtune *st = css_st(css);
+
+       return st->boost;
+}
+
+static int
+boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
+           s64 boost)
+{
+       struct schedtune *st = css_st(css);
+       unsigned threshold_idx;
+       int boost_pct;
+
+       if (boost < -100 || boost > 100)
+               return -EINVAL;
+       boost_pct = boost;
+
+       /*
+        * Update threshold params for Performance Boost (B)
+        * and Performance Constraint (C) regions.
+        * The current implementatio uses the same cuts for both
+        * B and C regions.
+        */
+       threshold_idx = clamp(boost_pct, 0, 99) / 10;
+       st->perf_boost_idx = threshold_idx;
+       st->perf_constrain_idx = threshold_idx;
+
+       st->boost = boost;
+       if (css == &root_schedtune.css) {
+               sysctl_sched_cfs_boost = boost;
+               perf_boost_idx  = threshold_idx;
+               perf_constrain_idx  = threshold_idx;
+       }
+
+       /* Update CPU boost */
+       schedtune_boostgroup_update(st->idx, st->boost);
+
+       trace_sched_tune_config(st->boost);
+
+       return 0;
+}
+
+static struct cftype files[] = {
+       {
+               .name = "boost",
+               .read_s64 = boost_read,
+               .write_s64 = boost_write,
+       },
+       {
+               .name = "prefer_idle",
+               .read_u64 = prefer_idle_read,
+               .write_u64 = prefer_idle_write,
+       },
+       { }     /* terminate */
+};
+
+static int
+schedtune_boostgroup_init(struct schedtune *st)
+{
+       struct boost_groups *bg;
+       int cpu;
+
+       /* Keep track of allocated boost groups */
+       allocated_group[st->idx] = st;
+
+       /* Initialize the per CPU boost groups */
+       for_each_possible_cpu(cpu) {
+               bg = &per_cpu(cpu_boost_groups, cpu);
+               bg->group[st->idx].boost = 0;
+               bg->group[st->idx].tasks = 0;
+       }
+
+       return 0;
+}
+
+static struct cgroup_subsys_state *
+schedtune_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+       struct schedtune *st;
+       int idx;
+
+       if (!parent_css)
+               return &root_schedtune.css;
+
+       /* Allow only single level hierachies */
+       if (parent_css != &root_schedtune.css) {
+               pr_err("Nested SchedTune boosting groups not allowed\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /* Allow only a limited number of boosting groups */
+       for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx)
+               if (!allocated_group[idx])
+                       break;
+       if (idx == BOOSTGROUPS_COUNT) {
+               pr_err("Trying to create more than %d SchedTune boosting groups\n",
+                      BOOSTGROUPS_COUNT);
+               return ERR_PTR(-ENOSPC);
+       }
+
+       st = kzalloc(sizeof(*st), GFP_KERNEL);
+       if (!st)
+               goto out;
+
+       /* Initialize per CPUs boost group support */
+       st->idx = idx;
+       if (schedtune_boostgroup_init(st))
+               goto release;
+
+       return &st->css;
+
+release:
+       kfree(st);
+out:
+       return ERR_PTR(-ENOMEM);
+}
+
+static void
+schedtune_boostgroup_release(struct schedtune *st)
+{
+       /* Reset this boost group */
+       schedtune_boostgroup_update(st->idx, 0);
+
+       /* Keep track of allocated boost groups */
+       allocated_group[st->idx] = NULL;
+}
+
+static void
+schedtune_css_free(struct cgroup_subsys_state *css)
+{
+       struct schedtune *st = css_st(css);
+
+       schedtune_boostgroup_release(st);
+       kfree(st);
+}
+
+struct cgroup_subsys schedtune_cgrp_subsys = {
+       .css_alloc      = schedtune_css_alloc,
+       .css_free       = schedtune_css_free,
+       .can_attach     = schedtune_can_attach,
+       .cancel_attach  = schedtune_cancel_attach,
+       .legacy_cftypes = files,
+       .early_init     = 1,
+};
+
+static inline void
+schedtune_init_cgroups(void)
+{
+       struct boost_groups *bg;
+       int cpu;
+
+       /* Initialize the per CPU boost groups */
+       for_each_possible_cpu(cpu) {
+               bg = &per_cpu(cpu_boost_groups, cpu);
+               memset(bg, 0, sizeof(struct boost_groups));
+               raw_spin_lock_init(&bg->lock);
+       }
+
+       pr_info("schedtune: configured to support %d boost groups\n",
+               BOOSTGROUPS_COUNT);
+
+       schedtune_initialized = true;
+}
+
+#else /* CONFIG_CGROUP_SCHEDTUNE */
+
+int
+schedtune_accept_deltas(int nrg_delta, int cap_delta,
+                       struct task_struct *task)
+{
+       /* Optimal (O) region */
+       if (nrg_delta < 0 && cap_delta > 0) {
+               trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, 1, 0);
+               return INT_MAX;
+       }
+
+       /* Suboptimal (S) region */
+       if (nrg_delta > 0 && cap_delta < 0) {
+               trace_sched_tune_filter(nrg_delta, cap_delta, 0, 0, -1, 5);
+               return -INT_MAX;
+       }
+
+       return __schedtune_accept_deltas(nrg_delta, cap_delta,
+                       perf_boost_idx, perf_constrain_idx);
+}
+
+#endif /* CONFIG_CGROUP_SCHEDTUNE */
+
+int
+sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
+                              void __user *buffer, size_t *lenp,
+                              loff_t *ppos)
+{
+       int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+       unsigned threshold_idx;
+       int boost_pct;
+
+       if (ret || !write)
+               return ret;
+
+       if (sysctl_sched_cfs_boost < -100 || sysctl_sched_cfs_boost > 100)
+               return -EINVAL;
+       boost_pct = sysctl_sched_cfs_boost;
+
+       /*
+        * Update threshold params for Performance Boost (B)
+        * and Performance Constraint (C) regions.
+        * The current implementatio uses the same cuts for both
+        * B and C regions.
+        */
+       threshold_idx = clamp(boost_pct, 0, 99) / 10;
+       perf_boost_idx = threshold_idx;
+       perf_constrain_idx = threshold_idx;
+
+       return 0;
+}
+
+#ifdef CONFIG_SCHED_DEBUG
+static void
+schedtune_test_nrg(unsigned long delta_pwr)
+{
+       unsigned long test_delta_pwr;
+       unsigned long test_norm_pwr;
+       int idx;
+
+       /*
+        * Check normalization constants using some constant system
+        * energy values
+        */
+       pr_info("schedtune: verify normalization constants...\n");
+       for (idx = 0; idx < 6; ++idx) {
+               test_delta_pwr = delta_pwr >> idx;
+
+               /* Normalize on max energy for target platform */
+               test_norm_pwr = reciprocal_divide(
+                                       test_delta_pwr << SCHED_LOAD_SHIFT,
+                                       schedtune_target_nrg.rdiv);
+
+               pr_info("schedtune: max_pwr/2^%d: %4lu => norm_pwr: %5lu\n",
+                       idx, test_delta_pwr, test_norm_pwr);
+       }
+}
+#else
+#define schedtune_test_nrg(delta_pwr)
+#endif
+
+/*
+ * Compute the min/max power consumption of a cluster and all its CPUs
+ */
+static void
+schedtune_add_cluster_nrg(
+               struct sched_domain *sd,
+               struct sched_group *sg,
+               struct target_nrg *ste)
+{
+       struct sched_domain *sd2;
+       struct sched_group *sg2;
+
+       struct cpumask *cluster_cpus;
+       char str[32];
+
+       unsigned long min_pwr;
+       unsigned long max_pwr;
+       int cpu;
+
+       /* Get Cluster energy using EM data for the first CPU */
+       cluster_cpus = sched_group_cpus(sg);
+       snprintf(str, 32, "CLUSTER[%*pbl]",
+                cpumask_pr_args(cluster_cpus));
+
+       min_pwr = sg->sge->idle_states[sg->sge->nr_idle_states - 1].power;
+       max_pwr = sg->sge->cap_states[sg->sge->nr_cap_states - 1].power;
+       pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
+               str, min_pwr, max_pwr);
+
+       /*
+        * Keep track of this cluster's energy in the computation of the
+        * overall system energy
+        */
+       ste->min_power += min_pwr;
+       ste->max_power += max_pwr;
+
+       /* Get CPU energy using EM data for each CPU in the group */
+       for_each_cpu(cpu, cluster_cpus) {
+               /* Get a SD view for the specific CPU */
+               for_each_domain(cpu, sd2) {
+                       /* Get the CPU group */
+                       sg2 = sd2->groups;
+                       min_pwr = sg2->sge->idle_states[sg2->sge->nr_idle_states - 1].power;
+                       max_pwr = sg2->sge->cap_states[sg2->sge->nr_cap_states - 1].power;
+
+                       ste->min_power += min_pwr;
+                       ste->max_power += max_pwr;
+
+                       snprintf(str, 32, "CPU[%d]", cpu);
+                       pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
+                               str, min_pwr, max_pwr);
+
+                       /*
+                        * Assume we have EM data only at the CPU and
+                        * the upper CLUSTER level
+                        */
+                       BUG_ON(!cpumask_equal(
+                               sched_group_cpus(sg),
+                               sched_group_cpus(sd2->parent->groups)
+                               ));
+                       break;
+               }
+       }
+}
+
+/*
+ * Initialize the constants required to compute normalized energy.
+ * The values of these constants depends on the EM data for the specific
+ * target system and topology.
+ * Thus, this function is expected to be called by the code
+ * that bind the EM to the topology information.
+ */
+static int
+schedtune_init(void)
+{
+       struct target_nrg *ste = &schedtune_target_nrg;
+       unsigned long delta_pwr = 0;
+       struct sched_domain *sd;
+       struct sched_group *sg;
+
+       pr_info("schedtune: init normalization constants...\n");
+       ste->max_power = 0;
+       ste->min_power = 0;
+
+       rcu_read_lock();
+
+       /*
+        * When EAS is in use, we always have a pointer to the highest SD
+        * which provides EM data.
+        */
+       sd = rcu_dereference(per_cpu(sd_ea, cpumask_first(cpu_online_mask)));
+       if (!sd) {
+               pr_info("schedtune: no energy model data\n");
+               goto nodata;
+       }
+
+       sg = sd->groups;
+       do {
+               schedtune_add_cluster_nrg(sd, sg, ste);
+       } while (sg = sg->next, sg != sd->groups);
+
+       rcu_read_unlock();
+
+       pr_info("schedtune: %-17s min_pwr: %5lu max_pwr: %5lu\n",
+               "SYSTEM", ste->min_power, ste->max_power);
+
+       /* Compute normalization constants */
+       delta_pwr = ste->max_power - ste->min_power;
+       ste->rdiv = reciprocal_value(delta_pwr);
+       pr_info("schedtune: using normalization constants mul: %u sh1: %u sh2: %u\n",
+               ste->rdiv.m, ste->rdiv.sh1, ste->rdiv.sh2);
+
+       schedtune_test_nrg(delta_pwr);
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+       schedtune_init_cgroups();
+#else
+       pr_info("schedtune: configured to support global boosting only\n");
+#endif
+
+       return 0;
+
+nodata:
+       rcu_read_unlock();
+       return -EINVAL;
+}
+postcore_initcall(schedtune_init);
diff --git a/kernel/sched/tune.h b/kernel/sched/tune.h
new file mode 100644 (file)
index 0000000..4f64417
--- /dev/null
@@ -0,0 +1,55 @@
+
+#ifdef CONFIG_SCHED_TUNE
+
+#include <linux/reciprocal_div.h>
+
+/*
+ * System energy normalization constants
+ */
+struct target_nrg {
+       unsigned long min_power;
+       unsigned long max_power;
+       struct reciprocal_value rdiv;
+};
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+
+int schedtune_cpu_boost(int cpu);
+int schedtune_task_boost(struct task_struct *tsk);
+
+int schedtune_prefer_idle(struct task_struct *tsk);
+
+void schedtune_exit_task(struct task_struct *tsk);
+
+void schedtune_enqueue_task(struct task_struct *p, int cpu);
+void schedtune_dequeue_task(struct task_struct *p, int cpu);
+
+#else /* CONFIG_CGROUP_SCHEDTUNE */
+
+#define schedtune_cpu_boost(cpu)  get_sysctl_sched_cfs_boost()
+#define schedtune_task_boost(tsk) get_sysctl_sched_cfs_boost()
+
+#define schedtune_exit_task(task) do { } while (0)
+
+#define schedtune_enqueue_task(task, cpu) do { } while (0)
+#define schedtune_dequeue_task(task, cpu) do { } while (0)
+
+#endif /* CONFIG_CGROUP_SCHEDTUNE */
+
+int schedtune_normalize_energy(int energy);
+int schedtune_accept_deltas(int nrg_delta, int cap_delta,
+                           struct task_struct *task);
+
+#else /* CONFIG_SCHED_TUNE */
+
+#define schedtune_cpu_boost(cpu)  0
+#define schedtune_task_boost(tsk) 0
+
+#define schedtune_exit_task(task) do { } while (0)
+
+#define schedtune_enqueue_task(task, cpu) do { } while (0)
+#define schedtune_dequeue_task(task, cpu) do { } while (0)
+
+#define schedtune_accept_deltas(nrg_delta, cap_delta, task) nrg_delta
+
+#endif /* CONFIG_SCHED_TUNE */
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
new file mode 100644 (file)
index 0000000..2ffb168
--- /dev/null
@@ -0,0 +1,1168 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Window Assisted Load Tracking (WALT) implementation credits:
+ * Srivatsa Vaddagiri, Steve Muckle, Syed Rameez Mustafa, Joonwoo Park,
+ * Pavan Kumar Kondeti, Olav Haugan
+ *
+ * 2016-03-06: Integration with EAS/refactoring by Vikram Mulukutla
+ *             and Todd Kjos
+ */
+
+#include <linux/syscore_ops.h>
+#include <linux/cpufreq.h>
+#include <trace/events/sched.h>
+#include "sched.h"
+#include "walt.h"
+
+#define WINDOW_STATS_RECENT            0
+#define WINDOW_STATS_MAX               1
+#define WINDOW_STATS_MAX_RECENT_AVG    2
+#define WINDOW_STATS_AVG               3
+#define WINDOW_STATS_INVALID_POLICY    4
+
+#define EXITING_TASK_MARKER    0xdeaddead
+
+static __read_mostly unsigned int walt_ravg_hist_size = 5;
+static __read_mostly unsigned int walt_window_stats_policy =
+       WINDOW_STATS_MAX_RECENT_AVG;
+static __read_mostly unsigned int walt_account_wait_time = 1;
+static __read_mostly unsigned int walt_freq_account_wait_time = 0;
+static __read_mostly unsigned int walt_io_is_busy = 0;
+
+unsigned int sysctl_sched_walt_init_task_load_pct = 15;
+
+/* 1 -> use PELT based load stats, 0 -> use window-based load stats */
+unsigned int __read_mostly walt_disabled = 0;
+
+static unsigned int max_possible_efficiency = 1024;
+static unsigned int min_possible_efficiency = 1024;
+
+/*
+ * Maximum possible frequency across all cpus. Task demand and cpu
+ * capacity (cpu_power) metrics are scaled in reference to it.
+ */
+static unsigned int max_possible_freq = 1;
+
+/*
+ * Minimum possible max_freq across all cpus. This will be same as
+ * max_possible_freq on homogeneous systems and could be different from
+ * max_possible_freq on heterogenous systems. min_max_freq is used to derive
+ * capacity (cpu_power) of cpus.
+ */
+static unsigned int min_max_freq = 1;
+
+static unsigned int max_capacity = 1024;
+static unsigned int min_capacity = 1024;
+static unsigned int max_load_scale_factor = 1024;
+static unsigned int max_possible_capacity = 1024;
+
+/* Mask of all CPUs that have  max_possible_capacity */
+static cpumask_t mpc_mask = CPU_MASK_ALL;
+
+/* Window size (in ns) */
+__read_mostly unsigned int walt_ravg_window = 20000000;
+
+/* Min window size (in ns) = 10ms */
+#define MIN_SCHED_RAVG_WINDOW 10000000
+
+/* Max window size (in ns) = 1s */
+#define MAX_SCHED_RAVG_WINDOW 1000000000
+
+static unsigned int sync_cpu;
+static ktime_t ktime_last;
+static bool walt_ktime_suspended;
+
+static unsigned int task_load(struct task_struct *p)
+{
+       return p->ravg.demand;
+}
+
+void
+walt_inc_cumulative_runnable_avg(struct rq *rq,
+                                struct task_struct *p)
+{
+       rq->cumulative_runnable_avg += p->ravg.demand;
+}
+
+void
+walt_dec_cumulative_runnable_avg(struct rq *rq,
+                                struct task_struct *p)
+{
+       rq->cumulative_runnable_avg -= p->ravg.demand;
+       BUG_ON((s64)rq->cumulative_runnable_avg < 0);
+}
+
+static void
+fixup_cumulative_runnable_avg(struct rq *rq,
+                             struct task_struct *p, s64 task_load_delta)
+{
+       rq->cumulative_runnable_avg += task_load_delta;
+       if ((s64)rq->cumulative_runnable_avg < 0)
+               panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
+                       task_load_delta, task_load(p));
+}
+
+u64 walt_ktime_clock(void)
+{
+       if (unlikely(walt_ktime_suspended))
+               return ktime_to_ns(ktime_last);
+       return ktime_get_ns();
+}
+
+static void walt_resume(void)
+{
+       walt_ktime_suspended = false;
+}
+
+static int walt_suspend(void)
+{
+       ktime_last = ktime_get();
+       walt_ktime_suspended = true;
+       return 0;
+}
+
+static struct syscore_ops walt_syscore_ops = {
+       .resume = walt_resume,
+       .suspend = walt_suspend
+};
+
+static int __init walt_init_ops(void)
+{
+       register_syscore_ops(&walt_syscore_ops);
+       return 0;
+}
+late_initcall(walt_init_ops);
+
+void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
+               struct task_struct *p)
+{
+       cfs_rq->cumulative_runnable_avg += p->ravg.demand;
+}
+
+void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
+               struct task_struct *p)
+{
+       cfs_rq->cumulative_runnable_avg -= p->ravg.demand;
+}
+
+static int exiting_task(struct task_struct *p)
+{
+       if (p->flags & PF_EXITING) {
+               if (p->ravg.sum_history[0] != EXITING_TASK_MARKER) {
+                       p->ravg.sum_history[0] = EXITING_TASK_MARKER;
+               }
+               return 1;
+       }
+       return 0;
+}
+
+static int __init set_walt_ravg_window(char *str)
+{
+       get_option(&str, &walt_ravg_window);
+
+       walt_disabled = (walt_ravg_window < MIN_SCHED_RAVG_WINDOW ||
+                               walt_ravg_window > MAX_SCHED_RAVG_WINDOW);
+       return 0;
+}
+
+early_param("walt_ravg_window", set_walt_ravg_window);
+
+static void
+update_window_start(struct rq *rq, u64 wallclock)
+{
+       s64 delta;
+       int nr_windows;
+
+       delta = wallclock - rq->window_start;
+       /* If the MPM global timer is cleared, set delta as 0 to avoid kernel BUG happening */
+       if (delta < 0) {
+               delta = 0;
+               WARN_ONCE(1, "WALT wallclock appears to have gone backwards or reset\n");
+       }
+
+       if (delta < walt_ravg_window)
+               return;
+
+       nr_windows = div64_u64(delta, walt_ravg_window);
+       rq->window_start += (u64)nr_windows * (u64)walt_ravg_window;
+}
+
+static u64 scale_exec_time(u64 delta, struct rq *rq)
+{
+       unsigned int cur_freq = rq->cur_freq;
+       int sf;
+
+       if (unlikely(cur_freq > max_possible_freq))
+               cur_freq = rq->max_possible_freq;
+
+       /* round up div64 */
+       delta = div64_u64(delta * cur_freq + max_possible_freq - 1,
+                         max_possible_freq);
+
+       sf = DIV_ROUND_UP(rq->efficiency * 1024, max_possible_efficiency);
+
+       delta *= sf;
+       delta >>= 10;
+
+       return delta;
+}
+
+static int cpu_is_waiting_on_io(struct rq *rq)
+{
+       if (!walt_io_is_busy)
+               return 0;
+
+       return atomic_read(&rq->nr_iowait);
+}
+
+void walt_account_irqtime(int cpu, struct task_struct *curr,
+                                u64 delta, u64 wallclock)
+{
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long flags, nr_windows;
+       u64 cur_jiffies_ts;
+
+       raw_spin_lock_irqsave(&rq->lock, flags);
+
+       /*
+        * cputime (wallclock) uses sched_clock so use the same here for
+        * consistency.
+        */
+       delta += sched_clock() - wallclock;
+       cur_jiffies_ts = get_jiffies_64();
+
+       if (is_idle_task(curr))
+               walt_update_task_ravg(curr, rq, IRQ_UPDATE, walt_ktime_clock(),
+                                delta);
+
+       nr_windows = cur_jiffies_ts - rq->irqload_ts;
+
+       if (nr_windows) {
+               if (nr_windows < 10) {
+                       /* Decay CPU's irqload by 3/4 for each window. */
+                       rq->avg_irqload *= (3 * nr_windows);
+                       rq->avg_irqload = div64_u64(rq->avg_irqload,
+                                                   4 * nr_windows);
+               } else {
+                       rq->avg_irqload = 0;
+               }
+               rq->avg_irqload += rq->cur_irqload;
+               rq->cur_irqload = 0;
+       }
+
+       rq->cur_irqload += delta;
+       rq->irqload_ts = cur_jiffies_ts;
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+
+#define WALT_HIGH_IRQ_TIMEOUT 3
+
+u64 walt_irqload(int cpu) {
+       struct rq *rq = cpu_rq(cpu);
+       s64 delta;
+       delta = get_jiffies_64() - rq->irqload_ts;
+
+        /*
+        * Current context can be preempted by irq and rq->irqload_ts can be
+        * updated by irq context so that delta can be negative.
+        * But this is okay and we can safely return as this means there
+        * was recent irq occurrence.
+        */
+
+        if (delta < WALT_HIGH_IRQ_TIMEOUT)
+               return rq->avg_irqload;
+        else
+               return 0;
+}
+
+int walt_cpu_high_irqload(int cpu) {
+       return walt_irqload(cpu) >= sysctl_sched_walt_cpu_high_irqload;
+}
+
+static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
+                                    u64 irqtime, int event)
+{
+       if (is_idle_task(p)) {
+               /* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */
+               if (event == PICK_NEXT_TASK)
+                       return 0;
+
+               /* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */
+               return irqtime || cpu_is_waiting_on_io(rq);
+       }
+
+       if (event == TASK_WAKE)
+               return 0;
+
+       if (event == PUT_PREV_TASK || event == IRQ_UPDATE ||
+                                        event == TASK_UPDATE)
+               return 1;
+
+       /* Only TASK_MIGRATE && PICK_NEXT_TASK left */
+       return walt_freq_account_wait_time;
+}
+
+/*
+ * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
+ */
+static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
+            int event, u64 wallclock, u64 irqtime)
+{
+       int new_window, nr_full_windows = 0;
+       int p_is_curr_task = (p == rq->curr);
+       u64 mark_start = p->ravg.mark_start;
+       u64 window_start = rq->window_start;
+       u32 window_size = walt_ravg_window;
+       u64 delta;
+
+       new_window = mark_start < window_start;
+       if (new_window) {
+               nr_full_windows = div64_u64((window_start - mark_start),
+                                               window_size);
+               if (p->ravg.active_windows < USHRT_MAX)
+                       p->ravg.active_windows++;
+       }
+
+       /* Handle per-task window rollover. We don't care about the idle
+        * task or exiting tasks. */
+       if (new_window && !is_idle_task(p) && !exiting_task(p)) {
+               u32 curr_window = 0;
+
+               if (!nr_full_windows)
+                       curr_window = p->ravg.curr_window;
+
+               p->ravg.prev_window = curr_window;
+               p->ravg.curr_window = 0;
+       }
+
+       if (!account_busy_for_cpu_time(rq, p, irqtime, event)) {
+               /* account_busy_for_cpu_time() = 0, so no update to the
+                * task's current window needs to be made. This could be
+                * for example
+                *
+                *   - a wakeup event on a task within the current
+                *     window (!new_window below, no action required),
+                *   - switching to a new task from idle (PICK_NEXT_TASK)
+                *     in a new window where irqtime is 0 and we aren't
+                *     waiting on IO */
+
+               if (!new_window)
+                       return;
+
+               /* A new window has started. The RQ demand must be rolled
+                * over if p is the current task. */
+               if (p_is_curr_task) {
+                       u64 prev_sum = 0;
+
+                       /* p is either idle task or an exiting task */
+                       if (!nr_full_windows) {
+                               prev_sum = rq->curr_runnable_sum;
+                       }
+
+                       rq->prev_runnable_sum = prev_sum;
+                       rq->curr_runnable_sum = 0;
+               }
+
+               return;
+       }
+
+       if (!new_window) {
+               /* account_busy_for_cpu_time() = 1 so busy time needs
+                * to be accounted to the current window. No rollover
+                * since we didn't start a new window. An example of this is
+                * when a task starts execution and then sleeps within the
+                * same window. */
+
+               if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
+                       delta = wallclock - mark_start;
+               else
+                       delta = irqtime;
+               delta = scale_exec_time(delta, rq);
+               rq->curr_runnable_sum += delta;
+               if (!is_idle_task(p) && !exiting_task(p))
+                       p->ravg.curr_window += delta;
+
+               return;
+       }
+
+       if (!p_is_curr_task) {
+               /* account_busy_for_cpu_time() = 1 so busy time needs
+                * to be accounted to the current window. A new window
+                * has also started, but p is not the current task, so the
+                * window is not rolled over - just split up and account
+                * as necessary into curr and prev. The window is only
+                * rolled over when a new window is processed for the current
+                * task.
+                *
+                * Irqtime can't be accounted by a task that isn't the
+                * currently running task. */
+
+               if (!nr_full_windows) {
+                       /* A full window hasn't elapsed, account partial
+                        * contribution to previous completed window. */
+                       delta = scale_exec_time(window_start - mark_start, rq);
+                       if (!exiting_task(p))
+                               p->ravg.prev_window += delta;
+               } else {
+                       /* Since at least one full window has elapsed,
+                        * the contribution to the previous window is the
+                        * full window (window_size). */
+                       delta = scale_exec_time(window_size, rq);
+                       if (!exiting_task(p))
+                               p->ravg.prev_window = delta;
+               }
+               rq->prev_runnable_sum += delta;
+
+               /* Account piece of busy time in the current window. */
+               delta = scale_exec_time(wallclock - window_start, rq);
+               rq->curr_runnable_sum += delta;
+               if (!exiting_task(p))
+                       p->ravg.curr_window = delta;
+
+               return;
+       }
+
+       if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
+               /* account_busy_for_cpu_time() = 1 so busy time needs
+                * to be accounted to the current window. A new window
+                * has started and p is the current task so rollover is
+                * needed. If any of these three above conditions are true
+                * then this busy time can't be accounted as irqtime.
+                *
+                * Busy time for the idle task or exiting tasks need not
+                * be accounted.
+                *
+                * An example of this would be a task that starts execution
+                * and then sleeps once a new window has begun. */
+
+               if (!nr_full_windows) {
+                       /* A full window hasn't elapsed, account partial
+                        * contribution to previous completed window. */
+                       delta = scale_exec_time(window_start - mark_start, rq);
+                       if (!is_idle_task(p) && !exiting_task(p))
+                               p->ravg.prev_window += delta;
+
+                       delta += rq->curr_runnable_sum;
+               } else {
+                       /* Since at least one full window has elapsed,
+                        * the contribution to the previous window is the
+                        * full window (window_size). */
+                       delta = scale_exec_time(window_size, rq);
+                       if (!is_idle_task(p) && !exiting_task(p))
+                               p->ravg.prev_window = delta;
+
+               }
+               /*
+                * Rollover for normal runnable sum is done here by overwriting
+                * the values in prev_runnable_sum and curr_runnable_sum.
+                * Rollover for new task runnable sum has completed by previous
+                * if-else statement.
+                */
+               rq->prev_runnable_sum = delta;
+
+               /* Account piece of busy time in the current window. */
+               delta = scale_exec_time(wallclock - window_start, rq);
+               rq->curr_runnable_sum = delta;
+               if (!is_idle_task(p) && !exiting_task(p))
+                       p->ravg.curr_window = delta;
+
+               return;
+       }
+
+       if (irqtime) {
+               /* account_busy_for_cpu_time() = 1 so busy time needs
+                * to be accounted to the current window. A new window
+                * has started and p is the current task so rollover is
+                * needed. The current task must be the idle task because
+                * irqtime is not accounted for any other task.
+                *
+                * Irqtime will be accounted each time we process IRQ activity
+                * after a period of idleness, so we know the IRQ busy time
+                * started at wallclock - irqtime. */
+
+               BUG_ON(!is_idle_task(p));
+               mark_start = wallclock - irqtime;
+
+               /* Roll window over. If IRQ busy time was just in the current
+                * window then that is all that need be accounted. */
+               rq->prev_runnable_sum = rq->curr_runnable_sum;
+               if (mark_start > window_start) {
+                       rq->curr_runnable_sum = scale_exec_time(irqtime, rq);
+                       return;
+               }
+
+               /* The IRQ busy time spanned multiple windows. Process the
+                * busy time preceding the current window start first. */
+               delta = window_start - mark_start;
+               if (delta > window_size)
+                       delta = window_size;
+               delta = scale_exec_time(delta, rq);
+               rq->prev_runnable_sum += delta;
+
+               /* Process the remaining IRQ busy time in the current window. */
+               delta = wallclock - window_start;
+               rq->curr_runnable_sum = scale_exec_time(delta, rq);
+
+               return;
+       }
+
+       BUG();
+}
+
+static int account_busy_for_task_demand(struct task_struct *p, int event)
+{
+       /* No need to bother updating task demand for exiting tasks
+        * or the idle task. */
+       if (exiting_task(p) || is_idle_task(p))
+               return 0;
+
+       /* When a task is waking up it is completing a segment of non-busy
+        * time. Likewise, if wait time is not treated as busy time, then
+        * when a task begins to run or is migrated, it is not running and
+        * is completing a segment of non-busy time. */
+       if (event == TASK_WAKE || (!walt_account_wait_time &&
+                        (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
+               return 0;
+
+       return 1;
+}
+
+/*
+ * Called when new window is starting for a task, to record cpu usage over
+ * recently concluded window(s). Normally 'samples' should be 1. It can be > 1
+ * when, say, a real-time task runs without preemption for several windows at a
+ * stretch.
+ */
+static void update_history(struct rq *rq, struct task_struct *p,
+                        u32 runtime, int samples, int event)
+{
+       u32 *hist = &p->ravg.sum_history[0];
+       int ridx, widx;
+       u32 max = 0, avg, demand;
+       u64 sum = 0;
+
+       /* Ignore windows where task had no activity */
+       if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
+                       goto done;
+
+       /* Push new 'runtime' value onto stack */
+       widx = walt_ravg_hist_size - 1;
+       ridx = widx - samples;
+       for (; ridx >= 0; --widx, --ridx) {
+               hist[widx] = hist[ridx];
+               sum += hist[widx];
+               if (hist[widx] > max)
+                       max = hist[widx];
+       }
+
+       for (widx = 0; widx < samples && widx < walt_ravg_hist_size; widx++) {
+               hist[widx] = runtime;
+               sum += hist[widx];
+               if (hist[widx] > max)
+                       max = hist[widx];
+       }
+
+       p->ravg.sum = 0;
+
+       if (walt_window_stats_policy == WINDOW_STATS_RECENT) {
+               demand = runtime;
+       } else if (walt_window_stats_policy == WINDOW_STATS_MAX) {
+               demand = max;
+       } else {
+               avg = div64_u64(sum, walt_ravg_hist_size);
+               if (walt_window_stats_policy == WINDOW_STATS_AVG)
+                       demand = avg;
+               else
+                       demand = max(avg, runtime);
+       }
+
+       /*
+        * A throttled deadline sched class task gets dequeued without
+        * changing p->on_rq. Since the dequeue decrements hmp stats
+        * avoid decrementing it here again.
+        */
+       if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
+                                               !p->dl.dl_throttled))
+               fixup_cumulative_runnable_avg(rq, p, demand);
+
+       p->ravg.demand = demand;
+
+done:
+       trace_walt_update_history(rq, p, runtime, samples, event);
+       return;
+}
+
+static void add_to_task_demand(struct rq *rq, struct task_struct *p,
+                               u64 delta)
+{
+       delta = scale_exec_time(delta, rq);
+       p->ravg.sum += delta;
+       if (unlikely(p->ravg.sum > walt_ravg_window))
+               p->ravg.sum = walt_ravg_window;
+}
+
+/*
+ * Account cpu demand of task and/or update task's cpu demand history
+ *
+ * ms = p->ravg.mark_start;
+ * wc = wallclock
+ * ws = rq->window_start
+ *
+ * Three possibilities:
+ *
+ *     a) Task event is contained within one window.
+ *             window_start < mark_start < wallclock
+ *
+ *             ws   ms  wc
+ *             |    |   |
+ *             V    V   V
+ *             |---------------|
+ *
+ *     In this case, p->ravg.sum is updated *iff* event is appropriate
+ *     (ex: event == PUT_PREV_TASK)
+ *
+ *     b) Task event spans two windows.
+ *             mark_start < window_start < wallclock
+ *
+ *             ms   ws   wc
+ *             |    |    |
+ *             V    V    V
+ *             -----|-------------------
+ *
+ *     In this case, p->ravg.sum is updated with (ws - ms) *iff* event
+ *     is appropriate, then a new window sample is recorded followed
+ *     by p->ravg.sum being set to (wc - ws) *iff* event is appropriate.
+ *
+ *     c) Task event spans more than two windows.
+ *
+ *             ms ws_tmp                          ws  wc
+ *             |  |                               |   |
+ *             V  V                               V   V
+ *             ---|-------|-------|-------|-------|------
+ *                |                               |
+ *                |<------ nr_full_windows ------>|
+ *
+ *     In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff*
+ *     event is appropriate, window sample of p->ravg.sum is recorded,
+ *     'nr_full_window' samples of window_size is also recorded *iff*
+ *     event is appropriate and finally p->ravg.sum is set to (wc - ws)
+ *     *iff* event is appropriate.
+ *
+ * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
+ * depends on it!
+ */
+static void update_task_demand(struct task_struct *p, struct rq *rq,
+            int event, u64 wallclock)
+{
+       u64 mark_start = p->ravg.mark_start;
+       u64 delta, window_start = rq->window_start;
+       int new_window, nr_full_windows;
+       u32 window_size = walt_ravg_window;
+
+       new_window = mark_start < window_start;
+       if (!account_busy_for_task_demand(p, event)) {
+               if (new_window)
+                       /* If the time accounted isn't being accounted as
+                        * busy time, and a new window started, only the
+                        * previous window need be closed out with the
+                        * pre-existing demand. Multiple windows may have
+                        * elapsed, but since empty windows are dropped,
+                        * it is not necessary to account those. */
+                       update_history(rq, p, p->ravg.sum, 1, event);
+               return;
+       }
+
+       if (!new_window) {
+               /* The simple case - busy time contained within the existing
+                * window. */
+               add_to_task_demand(rq, p, wallclock - mark_start);
+               return;
+       }
+
+       /* Busy time spans at least two windows. Temporarily rewind
+        * window_start to first window boundary after mark_start. */
+       delta = window_start - mark_start;
+       nr_full_windows = div64_u64(delta, window_size);
+       window_start -= (u64)nr_full_windows * (u64)window_size;
+
+       /* Process (window_start - mark_start) first */
+       add_to_task_demand(rq, p, window_start - mark_start);
+
+       /* Push new sample(s) into task's demand history */
+       update_history(rq, p, p->ravg.sum, 1, event);
+       if (nr_full_windows)
+               update_history(rq, p, scale_exec_time(window_size, rq),
+                              nr_full_windows, event);
+
+       /* Roll window_start back to current to process any remainder
+        * in current window. */
+       window_start += (u64)nr_full_windows * (u64)window_size;
+
+       /* Process (wallclock - window_start) next */
+       mark_start = window_start;
+       add_to_task_demand(rq, p, wallclock - mark_start);
+}
+
+/* Reflect task activity on its demand and cpu's busy time statistics */
+void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
+            int event, u64 wallclock, u64 irqtime)
+{
+       if (walt_disabled || !rq->window_start)
+               return;
+
+       lockdep_assert_held(&rq->lock);
+
+       update_window_start(rq, wallclock);
+
+       if (!p->ravg.mark_start)
+               goto done;
+
+       update_task_demand(p, rq, event, wallclock);
+       update_cpu_busy_time(p, rq, event, wallclock, irqtime);
+
+done:
+       trace_walt_update_task_ravg(p, rq, event, wallclock, irqtime);
+
+       p->ravg.mark_start = wallclock;
+}
+
+unsigned long __weak arch_get_cpu_efficiency(int cpu)
+{
+       return SCHED_LOAD_SCALE;
+}
+
+void walt_init_cpu_efficiency(void)
+{
+       int i, efficiency;
+       unsigned int max = 0, min = UINT_MAX;
+
+       for_each_possible_cpu(i) {
+               efficiency = arch_get_cpu_efficiency(i);
+               cpu_rq(i)->efficiency = efficiency;
+
+               if (efficiency > max)
+                       max = efficiency;
+               if (efficiency < min)
+                       min = efficiency;
+       }
+
+       if (max)
+               max_possible_efficiency = max;
+
+       if (min)
+               min_possible_efficiency = min;
+}
+
+static void reset_task_stats(struct task_struct *p)
+{
+       u32 sum = 0;
+
+       if (exiting_task(p))
+               sum = EXITING_TASK_MARKER;
+
+       memset(&p->ravg, 0, sizeof(struct ravg));
+       /* Retain EXITING_TASK marker */
+       p->ravg.sum_history[0] = sum;
+}
+
+void walt_mark_task_starting(struct task_struct *p)
+{
+       u64 wallclock;
+       struct rq *rq = task_rq(p);
+
+       if (!rq->window_start) {
+               reset_task_stats(p);
+               return;
+       }
+
+       wallclock = walt_ktime_clock();
+       p->ravg.mark_start = wallclock;
+}
+
+void walt_set_window_start(struct rq *rq)
+{
+       int cpu = cpu_of(rq);
+       struct rq *sync_rq = cpu_rq(sync_cpu);
+
+       if (rq->window_start)
+               return;
+
+       if (cpu == sync_cpu) {
+               rq->window_start = walt_ktime_clock();
+       } else {
+               raw_spin_unlock(&rq->lock);
+               double_rq_lock(rq, sync_rq);
+               rq->window_start = cpu_rq(sync_cpu)->window_start;
+               rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+               raw_spin_unlock(&sync_rq->lock);
+       }
+
+       rq->curr->ravg.mark_start = rq->window_start;
+}
+
+void walt_migrate_sync_cpu(int cpu)
+{
+       if (cpu == sync_cpu)
+               sync_cpu = smp_processor_id();
+}
+
+void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
+{
+       struct rq *src_rq = task_rq(p);
+       struct rq *dest_rq = cpu_rq(new_cpu);
+       u64 wallclock;
+
+       if (!p->on_rq && p->state != TASK_WAKING)
+               return;
+
+       if (exiting_task(p)) {
+               return;
+       }
+
+       if (p->state == TASK_WAKING)
+               double_rq_lock(src_rq, dest_rq);
+
+       wallclock = walt_ktime_clock();
+
+       walt_update_task_ravg(task_rq(p)->curr, task_rq(p),
+                       TASK_UPDATE, wallclock, 0);
+       walt_update_task_ravg(dest_rq->curr, dest_rq,
+                       TASK_UPDATE, wallclock, 0);
+
+       walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0);
+
+       if (p->ravg.curr_window) {
+               src_rq->curr_runnable_sum -= p->ravg.curr_window;
+               dest_rq->curr_runnable_sum += p->ravg.curr_window;
+       }
+
+       if (p->ravg.prev_window) {
+               src_rq->prev_runnable_sum -= p->ravg.prev_window;
+               dest_rq->prev_runnable_sum += p->ravg.prev_window;
+       }
+
+       if ((s64)src_rq->prev_runnable_sum < 0) {
+               src_rq->prev_runnable_sum = 0;
+               WARN_ON(1);
+       }
+       if ((s64)src_rq->curr_runnable_sum < 0) {
+               src_rq->curr_runnable_sum = 0;
+               WARN_ON(1);
+       }
+
+       trace_walt_migration_update_sum(src_rq, p);
+       trace_walt_migration_update_sum(dest_rq, p);
+
+       if (p->state == TASK_WAKING)
+               double_rq_unlock(src_rq, dest_rq);
+}
+
+/* Keep track of max/min capacity possible across CPUs "currently" */
+static void __update_min_max_capacity(void)
+{
+       int i;
+       int max = 0, min = INT_MAX;
+
+       for_each_online_cpu(i) {
+               if (cpu_rq(i)->capacity > max)
+                       max = cpu_rq(i)->capacity;
+               if (cpu_rq(i)->capacity < min)
+                       min = cpu_rq(i)->capacity;
+       }
+
+       max_capacity = max;
+       min_capacity = min;
+}
+
+static void update_min_max_capacity(void)
+{
+       unsigned long flags;
+       int i;
+
+       local_irq_save(flags);
+       for_each_possible_cpu(i)
+               raw_spin_lock(&cpu_rq(i)->lock);
+
+       __update_min_max_capacity();
+
+       for_each_possible_cpu(i)
+               raw_spin_unlock(&cpu_rq(i)->lock);
+       local_irq_restore(flags);
+}
+
+/*
+ * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
+ * least efficient cpu gets capacity of 1024
+ */
+static unsigned long capacity_scale_cpu_efficiency(int cpu)
+{
+       return (1024 * cpu_rq(cpu)->efficiency) / min_possible_efficiency;
+}
+
+/*
+ * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
+ * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
+ */
+static unsigned long capacity_scale_cpu_freq(int cpu)
+{
+       return (1024 * cpu_rq(cpu)->max_freq) / min_max_freq;
+}
+
+/*
+ * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
+ * that "most" efficient cpu gets a load_scale_factor of 1
+ */
+static unsigned long load_scale_cpu_efficiency(int cpu)
+{
+       return DIV_ROUND_UP(1024 * max_possible_efficiency,
+                           cpu_rq(cpu)->efficiency);
+}
+
+/*
+ * Return load_scale_factor of a cpu in reference to cpu with best max_freq
+ * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
+ * of 1.
+ */
+static unsigned long load_scale_cpu_freq(int cpu)
+{
+       return DIV_ROUND_UP(1024 * max_possible_freq, cpu_rq(cpu)->max_freq);
+}
+
+static int compute_capacity(int cpu)
+{
+       int capacity = 1024;
+
+       capacity *= capacity_scale_cpu_efficiency(cpu);
+       capacity >>= 10;
+
+       capacity *= capacity_scale_cpu_freq(cpu);
+       capacity >>= 10;
+
+       return capacity;
+}
+
+static int compute_load_scale_factor(int cpu)
+{
+       int load_scale = 1024;
+
+       /*
+        * load_scale_factor accounts for the fact that task load
+        * is in reference to "best" performing cpu. Task's load will need to be
+        * scaled (up) by a factor to determine suitability to be placed on a
+        * (little) cpu.
+        */
+       load_scale *= load_scale_cpu_efficiency(cpu);
+       load_scale >>= 10;
+
+       load_scale *= load_scale_cpu_freq(cpu);
+       load_scale >>= 10;
+
+       return load_scale;
+}
+
+static int cpufreq_notifier_policy(struct notifier_block *nb,
+               unsigned long val, void *data)
+{
+       struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
+       int i, update_max = 0;
+       u64 highest_mpc = 0, highest_mplsf = 0;
+       const struct cpumask *cpus = policy->related_cpus;
+       unsigned int orig_min_max_freq = min_max_freq;
+       unsigned int orig_max_possible_freq = max_possible_freq;
+       /* Initialized to policy->max in case policy->related_cpus is empty! */
+       unsigned int orig_max_freq = policy->max;
+
+       if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
+                                               val != CPUFREQ_CREATE_POLICY)
+               return 0;
+
+       if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
+               update_min_max_capacity();
+               return 0;
+       }
+
+       for_each_cpu(i, policy->related_cpus) {
+               cpumask_copy(&cpu_rq(i)->freq_domain_cpumask,
+                            policy->related_cpus);
+               orig_max_freq = cpu_rq(i)->max_freq;
+               cpu_rq(i)->min_freq = policy->min;
+               cpu_rq(i)->max_freq = policy->max;
+               cpu_rq(i)->cur_freq = policy->cur;
+               cpu_rq(i)->max_possible_freq = policy->cpuinfo.max_freq;
+       }
+
+       max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
+       if (min_max_freq == 1)
+               min_max_freq = UINT_MAX;
+       min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
+       BUG_ON(!min_max_freq);
+       BUG_ON(!policy->max);
+
+       /* Changes to policy other than max_freq don't require any updates */
+       if (orig_max_freq == policy->max)
+               return 0;
+
+       /*
+        * A changed min_max_freq or max_possible_freq (possible during bootup)
+        * needs to trigger re-computation of load_scale_factor and capacity for
+        * all possible cpus (even those offline). It also needs to trigger
+        * re-computation of nr_big_task count on all online cpus.
+        *
+        * A changed rq->max_freq otoh needs to trigger re-computation of
+        * load_scale_factor and capacity for just the cluster of cpus involved.
+        * Since small task definition depends on max_load_scale_factor, a
+        * changed load_scale_factor of one cluster could influence
+        * classification of tasks in another cluster. Hence a changed
+        * rq->max_freq will need to trigger re-computation of nr_big_task
+        * count on all online cpus.
+        *
+        * While it should be sufficient for nr_big_tasks to be
+        * re-computed for only online cpus, we have inadequate context
+        * information here (in policy notifier) with regard to hotplug-safety
+        * context in which notification is issued. As a result, we can't use
+        * get_online_cpus() here, as it can lead to deadlock. Until cpufreq is
+        * fixed up to issue notification always in hotplug-safe context,
+        * re-compute nr_big_task for all possible cpus.
+        */
+
+       if (orig_min_max_freq != min_max_freq ||
+               orig_max_possible_freq != max_possible_freq) {
+                       cpus = cpu_possible_mask;
+                       update_max = 1;
+       }
+
+       /*
+        * Changed load_scale_factor can trigger reclassification of tasks as
+        * big or small. Make this change "atomic" so that tasks are accounted
+        * properly due to changed load_scale_factor
+        */
+       for_each_cpu(i, cpus) {
+               struct rq *rq = cpu_rq(i);
+
+               rq->capacity = compute_capacity(i);
+               rq->load_scale_factor = compute_load_scale_factor(i);
+
+               if (update_max) {
+                       u64 mpc, mplsf;
+
+                       mpc = div_u64(((u64) rq->capacity) *
+                               rq->max_possible_freq, rq->max_freq);
+                       rq->max_possible_capacity = (int) mpc;
+
+                       mplsf = div_u64(((u64) rq->load_scale_factor) *
+                               rq->max_possible_freq, rq->max_freq);
+
+                       if (mpc > highest_mpc) {
+                               highest_mpc = mpc;
+                               cpumask_clear(&mpc_mask);
+                               cpumask_set_cpu(i, &mpc_mask);
+                       } else if (mpc == highest_mpc) {
+                               cpumask_set_cpu(i, &mpc_mask);
+                       }
+
+                       if (mplsf > highest_mplsf)
+                               highest_mplsf = mplsf;
+               }
+       }
+
+       if (update_max) {
+               max_possible_capacity = highest_mpc;
+               max_load_scale_factor = highest_mplsf;
+       }
+
+       __update_min_max_capacity();
+
+       return 0;
+}
+
+static int cpufreq_notifier_trans(struct notifier_block *nb,
+               unsigned long val, void *data)
+{
+       struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
+       unsigned int cpu = freq->cpu, new_freq = freq->new;
+       unsigned long flags;
+       int i;
+
+       if (val != CPUFREQ_POSTCHANGE)
+               return 0;
+
+       BUG_ON(!new_freq);
+
+       if (cpu_rq(cpu)->cur_freq == new_freq)
+               return 0;
+
+       for_each_cpu(i, &cpu_rq(cpu)->freq_domain_cpumask) {
+               struct rq *rq = cpu_rq(i);
+
+               raw_spin_lock_irqsave(&rq->lock, flags);
+               walt_update_task_ravg(rq->curr, rq, TASK_UPDATE,
+                                     walt_ktime_clock(), 0);
+               rq->cur_freq = new_freq;
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
+       }
+
+       return 0;
+}
+
+static struct notifier_block notifier_policy_block = {
+       .notifier_call = cpufreq_notifier_policy
+};
+
+static struct notifier_block notifier_trans_block = {
+       .notifier_call = cpufreq_notifier_trans
+};
+
+static int register_sched_callback(void)
+{
+       int ret;
+
+       ret = cpufreq_register_notifier(&notifier_policy_block,
+                                               CPUFREQ_POLICY_NOTIFIER);
+
+       if (!ret)
+               ret = cpufreq_register_notifier(&notifier_trans_block,
+                                               CPUFREQ_TRANSITION_NOTIFIER);
+
+       return 0;
+}
+
+/*
+ * cpufreq callbacks can be registered at core_initcall or later time.
+ * Any registration done prior to that is "forgotten" by cpufreq. See
+ * initialization of variable init_cpufreq_transition_notifier_list_called
+ * for further information.
+ */
+core_initcall(register_sched_callback);
+
+void walt_init_new_task_load(struct task_struct *p)
+{
+       int i;
+       u32 init_load_windows =
+                       div64_u64((u64)sysctl_sched_walt_init_task_load_pct *
+                          (u64)walt_ravg_window, 100);
+       u32 init_load_pct = current->init_load_pct;
+
+       p->init_load_pct = 0;
+       memset(&p->ravg, 0, sizeof(struct ravg));
+
+       if (init_load_pct) {
+               init_load_windows = div64_u64((u64)init_load_pct *
+                         (u64)walt_ravg_window, 100);
+       }
+
+       p->ravg.demand = init_load_windows;
+       for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
+               p->ravg.sum_history[i] = init_load_windows;
+}
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
new file mode 100644 (file)
index 0000000..e181c87
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __WALT_H
+#define __WALT_H
+
+#ifdef CONFIG_SCHED_WALT
+
+void walt_update_task_ravg(struct task_struct *p, struct rq *rq, int event,
+               u64 wallclock, u64 irqtime);
+void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
+void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p);
+void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
+               struct task_struct *p);
+void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
+               struct task_struct *p);
+void walt_fixup_busy_time(struct task_struct *p, int new_cpu);
+void walt_init_new_task_load(struct task_struct *p);
+void walt_mark_task_starting(struct task_struct *p);
+void walt_set_window_start(struct rq *rq);
+void walt_migrate_sync_cpu(int cpu);
+void walt_init_cpu_efficiency(void);
+u64 walt_ktime_clock(void);
+void walt_account_irqtime(int cpu, struct task_struct *curr, u64 delta,
+                                  u64 wallclock);
+
+u64 walt_irqload(int cpu);
+int walt_cpu_high_irqload(int cpu);
+
+#else /* CONFIG_SCHED_WALT */
+
+static inline void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
+               int event, u64 wallclock, u64 irqtime) { }
+static inline void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
+static inline void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { }
+static inline void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
+               struct task_struct *p) { }
+static inline void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq,
+               struct task_struct *p) { }
+static inline void walt_fixup_busy_time(struct task_struct *p, int new_cpu) { }
+static inline void walt_init_new_task_load(struct task_struct *p) { }
+static inline void walt_mark_task_starting(struct task_struct *p) { }
+static inline void walt_set_window_start(struct rq *rq) { }
+static inline void walt_migrate_sync_cpu(int cpu) { }
+static inline void walt_init_cpu_efficiency(void) { }
+static inline u64 walt_ktime_clock(void) { return 0; }
+
+#endif /* CONFIG_SCHED_WALT */
+
+extern unsigned int walt_disabled;
+
+#endif
index 78947de6f9691e898adf3803ec59fded4a0a28da..ba3ddb43dd9fe4cca48544ae127702a424adacbb 100644 (file)
@@ -41,6 +41,8 @@
 #include <linux/syscore_ops.h>
 #include <linux/version.h>
 #include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/mempolicy.h>
 
 #include <linux/compat.h>
 #include <linux/syscalls.h>
@@ -2072,10 +2074,158 @@ static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
 }
 #endif
 
+#ifdef CONFIG_MMU
+static int prctl_update_vma_anon_name(struct vm_area_struct *vma,
+               struct vm_area_struct **prev,
+               unsigned long start, unsigned long end,
+               const char __user *name_addr)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       int error = 0;
+       pgoff_t pgoff;
+
+       if (name_addr == vma_get_anon_name(vma)) {
+               *prev = vma;
+               goto out;
+       }
+
+       pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
+       *prev = vma_merge(mm, *prev, start, end, vma->vm_flags, vma->anon_vma,
+                               vma->vm_file, pgoff, vma_policy(vma),
+                               vma->vm_userfaultfd_ctx, name_addr);
+       if (*prev) {
+               vma = *prev;
+               goto success;
+       }
+
+       *prev = vma;
+
+       if (start != vma->vm_start) {
+               error = split_vma(mm, vma, start, 1);
+               if (error)
+                       goto out;
+       }
+
+       if (end != vma->vm_end) {
+               error = split_vma(mm, vma, end, 0);
+               if (error)
+                       goto out;
+       }
+
+success:
+       if (!vma->vm_file)
+               vma->anon_name = name_addr;
+
+out:
+       if (error == -ENOMEM)
+               error = -EAGAIN;
+       return error;
+}
+
+static int prctl_set_vma_anon_name(unsigned long start, unsigned long end,
+                       unsigned long arg)
+{
+       unsigned long tmp;
+       struct vm_area_struct *vma, *prev;
+       int unmapped_error = 0;
+       int error = -EINVAL;
+
+       /*
+        * If the interval [start,end) covers some unmapped address
+        * ranges, just ignore them, but return -ENOMEM at the end.
+        * - this matches the handling in madvise.
+        */
+       vma = find_vma_prev(current->mm, start, &prev);
+       if (vma && start > vma->vm_start)
+               prev = vma;
+
+       for (;;) {
+               /* Still start < end. */
+               error = -ENOMEM;
+               if (!vma)
+                       return error;
+
+               /* Here start < (end|vma->vm_end). */
+               if (start < vma->vm_start) {
+                       unmapped_error = -ENOMEM;
+                       start = vma->vm_start;
+                       if (start >= end)
+                               return error;
+               }
+
+               /* Here vma->vm_start <= start < (end|vma->vm_end) */
+               tmp = vma->vm_end;
+               if (end < tmp)
+                       tmp = end;
+
+               /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
+               error = prctl_update_vma_anon_name(vma, &prev, start, tmp,
+                               (const char __user *)arg);
+               if (error)
+                       return error;
+               start = tmp;
+               if (prev && start < prev->vm_end)
+                       start = prev->vm_end;
+               error = unmapped_error;
+               if (start >= end)
+                       return error;
+               if (prev)
+                       vma = prev->vm_next;
+               else    /* madvise_remove dropped mmap_sem */
+                       vma = find_vma(current->mm, start);
+       }
+}
+
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+               unsigned long len_in, unsigned long arg)
+{
+       struct mm_struct *mm = current->mm;
+       int error;
+       unsigned long len;
+       unsigned long end;
+
+       if (start & ~PAGE_MASK)
+               return -EINVAL;
+       len = (len_in + ~PAGE_MASK) & PAGE_MASK;
+
+       /* Check to see whether len was rounded up from small -ve to zero */
+       if (len_in && !len)
+               return -EINVAL;
+
+       end = start + len;
+       if (end < start)
+               return -EINVAL;
+
+       if (end == start)
+               return 0;
+
+       down_write(&mm->mmap_sem);
+
+       switch (opt) {
+       case PR_SET_VMA_ANON_NAME:
+               error = prctl_set_vma_anon_name(start, end, arg);
+               break;
+       default:
+               error = -EINVAL;
+       }
+
+       up_write(&mm->mmap_sem);
+
+       return error;
+}
+#else /* CONFIG_MMU */
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+               unsigned long len_in, unsigned long arg)
+{
+       return -EINVAL;
+}
+#endif
+
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                unsigned long, arg4, unsigned long, arg5)
 {
        struct task_struct *me = current;
+       struct task_struct *tsk;
        unsigned char comm[sizeof(me->comm)];
        long error;
 
@@ -2169,7 +2319,10 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                error = perf_event_task_enable();
                break;
        case PR_GET_TIMERSLACK:
-               error = current->timer_slack_ns;
+               if (current->timer_slack_ns > ULONG_MAX)
+                       error = ULONG_MAX;
+               else
+                       error = current->timer_slack_ns;
                break;
        case PR_SET_TIMERSLACK:
                if (arg2 <= 0)
@@ -2218,6 +2371,26 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
        case PR_GET_TID_ADDRESS:
                error = prctl_get_tid_address(me, (int __user **)arg2);
                break;
+       case PR_SET_TIMERSLACK_PID:
+               if (task_pid_vnr(current) != (pid_t)arg3 &&
+                               !capable(CAP_SYS_NICE))
+                       return -EPERM;
+               rcu_read_lock();
+               tsk = find_task_by_vpid((pid_t)arg3);
+               if (tsk == NULL) {
+                       rcu_read_unlock();
+                       return -EINVAL;
+               }
+               get_task_struct(tsk);
+               rcu_read_unlock();
+               if (arg2 <= 0)
+                       tsk->timer_slack_ns =
+                               tsk->default_timer_slack_ns;
+               else
+                       tsk->timer_slack_ns = arg2;
+               put_task_struct(tsk);
+               error = 0;
+               break;
        case PR_SET_CHILD_SUBREAPER:
                me->signal->is_child_subreaper = !!arg2;
                break;
@@ -2266,6 +2439,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
        case PR_GET_FP_MODE:
                error = GET_FP_MODE(me);
                break;
+       case PR_SET_VMA:
+               error = prctl_set_vma(arg2, arg3, arg4, arg5);
+               break;
        default:
                error = -EINVAL;
                break;
index 999e025bf68eeb2cad2ea797d369003826f4bae6..abb795e8a6f1f1e4f9c472577a3b41bdea37b6b1 100644 (file)
@@ -104,6 +104,7 @@ extern char core_pattern[];
 extern unsigned int core_pipe_limit;
 #endif
 extern int pid_max;
+extern int extra_free_kbytes;
 extern int pid_max_min, pid_max_max;
 extern int percpu_pagelist_fraction;
 extern int compat_log;
@@ -303,6 +304,64 @@ static struct ctl_table kern_table[] = {
                .extra1         = &min_sched_granularity_ns,
                .extra2         = &max_sched_granularity_ns,
        },
+       {
+               .procname       = "sched_is_big_little",
+               .data           = &sysctl_sched_is_big_little,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+#ifdef CONFIG_SCHED_WALT
+       {
+               .procname       = "sched_use_walt_cpu_util",
+               .data           = &sysctl_sched_use_walt_cpu_util,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "sched_use_walt_task_util",
+               .data           = &sysctl_sched_use_walt_task_util,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "sched_walt_init_task_load_pct",
+               .data           = &sysctl_sched_walt_init_task_load_pct,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "sched_walt_cpu_high_irqload",
+               .data           = &sysctl_sched_walt_cpu_high_irqload,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+#endif
+       {
+               .procname       = "sched_sync_hint_enable",
+               .data           = &sysctl_sched_sync_hint_enable,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "sched_initial_task_util",
+               .data           = &sysctl_sched_initial_task_util,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "sched_cstate_aware",
+               .data           = &sysctl_sched_cstate_aware,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
        {
                .procname       = "sched_wakeup_granularity_ns",
                .data           = &sysctl_sched_wakeup_granularity,
@@ -434,6 +493,21 @@ static struct ctl_table kern_table[] = {
                .extra1         = &one,
        },
 #endif
+#ifdef CONFIG_SCHED_TUNE
+       {
+               .procname       = "sched_cfs_boost",
+               .data           = &sysctl_sched_cfs_boost,
+               .maxlen         = sizeof(sysctl_sched_cfs_boost),
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+               .mode           = 0444,
+#else
+               .mode           = 0644,
+#endif
+               .proc_handler   = &sysctl_sched_cfs_boost_handler,
+               .extra1         = &zero,
+               .extra2         = &one_hundred,
+       },
+#endif
 #ifdef CONFIG_PROVE_LOCKING
        {
                .procname       = "prove_locking",
@@ -1392,6 +1466,14 @@ static struct ctl_table vm_table[] = {
                .proc_handler   = min_free_kbytes_sysctl_handler,
                .extra1         = &zero,
        },
+       {
+               .procname       = "extra_free_kbytes",
+               .data           = &extra_free_kbytes,
+               .maxlen         = sizeof(extra_free_kbytes),
+               .mode           = 0644,
+               .proc_handler   = min_free_kbytes_sysctl_handler,
+               .extra1         = &zero,
+       },
        {
                .procname       = "percpu_pagelist_fraction",
                .data           = &percpu_pagelist_fraction,
@@ -1568,6 +1650,28 @@ static struct ctl_table vm_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_doulongvec_minmax,
        },
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
+       {
+               .procname       = "mmap_rnd_bits",
+               .data           = &mmap_rnd_bits,
+               .maxlen         = sizeof(mmap_rnd_bits),
+               .mode           = 0600,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = (void *)&mmap_rnd_bits_min,
+               .extra2         = (void *)&mmap_rnd_bits_max,
+       },
+#endif
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
+       {
+               .procname       = "mmap_rnd_compat_bits",
+               .data           = &mmap_rnd_compat_bits,
+               .maxlen         = sizeof(mmap_rnd_compat_bits),
+               .mode           = 0600,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = (void *)&mmap_rnd_compat_bits_min,
+               .extra2         = (void *)&mmap_rnd_compat_bits_max,
+       },
+#endif
        { }
 };
 
index 17f7bcff1e02bd6ff97c90943498557ea1a26c11..405536b22c0ccbdda2de491f57689c4af8d467f3 100644 (file)
@@ -984,7 +984,7 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
  *             relative (HRTIMER_MODE_REL)
  */
 void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
-                           unsigned long delta_ns, const enum hrtimer_mode mode)
+                           u64 delta_ns, const enum hrtimer_mode mode)
 {
        struct hrtimer_clock_base *base, *new_base;
        unsigned long flags;
@@ -1553,7 +1553,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
        struct restart_block *restart;
        struct hrtimer_sleeper t;
        int ret = 0;
-       unsigned long slack;
+       u64 slack;
 
        slack = current->timer_slack_ns;
        if (dl_task(current) || rt_task(current))
@@ -1729,7 +1729,7 @@ void __init hrtimers_init(void)
  * @clock:     timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
  */
 int __sched
-schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
+schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
                               const enum hrtimer_mode mode, int clock)
 {
        struct hrtimer_sleeper t;
@@ -1797,7 +1797,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
  *
  * Returns 0 when the timer has expired otherwise -EINTR
  */
-int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
+int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
                                     const enum hrtimer_mode mode)
 {
        return schedule_hrtimeout_range_clock(expires, delta, mode,
index 445601c580d69be7970314238e1c0af5680b9eda..ede4bf13d3e93649be0a474000607c6a3b898965 100644 (file)
@@ -424,6 +424,35 @@ u64 ktime_get_raw_fast_ns(void)
 }
 EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
 
+/**
+ * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
+ *
+ * To keep it NMI safe since we're accessing from tracing, we're not using a
+ * separate timekeeper with updates to monotonic clock and boot offset
+ * protected with seqlocks. This has the following minor side effects:
+ *
+ * (1) Its possible that a timestamp be taken after the boot offset is updated
+ * but before the timekeeper is updated. If this happens, the new boot offset
+ * is added to the old timekeeping making the clock appear to update slightly
+ * earlier:
+ *    CPU 0                                        CPU 1
+ *    timekeeping_inject_sleeptime64()
+ *    __timekeeping_inject_sleeptime(tk, delta);
+ *                                                 timestamp();
+ *    timekeeping_update(tk, TK_CLEAR_NTP...);
+ *
+ * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
+ * partially updated.  Since the tk->offs_boot update is a rare event, this
+ * should be a rare occurrence which postprocessing should be able to handle.
+ */
+u64 notrace ktime_get_boot_fast_ns(void)
+{
+       struct timekeeper *tk = &tk_core.timekeeper;
+
+       return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
+}
+EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
+
 /* Suspend-time cycles value for halted fast timekeeper. */
 static cycle_t cycles_at_suspend;
 
index bbc5d1114583b21f1ab489cd40a98f3aa5ebd61d..d1798fa0c7432527cb5bde3f99455b0c28bce068 100644 (file)
@@ -1698,10 +1698,10 @@ EXPORT_SYMBOL(msleep_interruptible);
 static void __sched do_usleep_range(unsigned long min, unsigned long max)
 {
        ktime_t kmin;
-       unsigned long delta;
+       u64 delta;
 
        kmin = ktime_set(0, min * NSEC_PER_USEC);
-       delta = (max - min) * NSEC_PER_USEC;
+       delta = (u64)(max - min) * NSEC_PER_USEC;
        schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
 }
 
index e45db6b0d8784a762fb5d088fea8b731135c05ae..5f5b66a2f156c2077004edeb02c6f69312361910 100644 (file)
@@ -77,6 +77,9 @@ config EVENT_TRACING
        select CONTEXT_SWITCH_TRACER
        bool
 
+config GPU_TRACEPOINTS
+       bool
+
 config CONTEXT_SWITCH_TRACER
        bool
 
index 05ea5167e6bbaf8de9285dd6ea3ca161c08ae9e1..a9bba37fab5aec746885b403e7452fb39110b861 100644 (file)
@@ -68,6 +68,7 @@ obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
 endif
 obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
 obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o
+obj-$(CONFIG_GPU_TRACEPOINTS) += gpu-traces.o
 
 obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
 
diff --git a/kernel/trace/gpu-traces.c b/kernel/trace/gpu-traces.c
new file mode 100644 (file)
index 0000000..a4b3f00
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * GPU tracepoints
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/gpu.h>
+
+EXPORT_TRACEPOINT_SYMBOL(gpu_sched_switch);
+EXPORT_TRACEPOINT_SYMBOL(gpu_job_enqueue);
index 059233abcfcf8d532fa227a380f366d4a476b00a..293af3346c8c96a8672a83ac622092325db430ab 100644 (file)
@@ -890,6 +890,7 @@ static struct {
        { trace_clock,                  "perf",         1 },
        { ktime_get_mono_fast_ns,       "mono",         1 },
        { ktime_get_raw_fast_ns,        "mono_raw",     1 },
+       { ktime_get_boot_fast_ns,       "boot",         1 },
        ARCH_TRACE_CLOCKS
 };
 
@@ -1352,6 +1353,7 @@ void tracing_reset_all_online_cpus(void)
 
 #define SAVED_CMDLINES_DEFAULT 128
 #define NO_CMDLINE_MAP UINT_MAX
+static unsigned saved_tgids[SAVED_CMDLINES_DEFAULT];
 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 struct saved_cmdlines_buffer {
        unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
@@ -1590,7 +1592,7 @@ static int trace_save_cmdline(struct task_struct *tsk)
        }
 
        set_cmdline(idx, tsk->comm);
-
+       saved_tgids[idx] = tsk->tgid;
        arch_spin_unlock(&trace_cmdline_lock);
 
        return 1;
@@ -1633,6 +1635,25 @@ void trace_find_cmdline(int pid, char comm[])
        preempt_enable();
 }
 
+int trace_find_tgid(int pid)
+{
+       unsigned map;
+       int tgid;
+
+       preempt_disable();
+       arch_spin_lock(&trace_cmdline_lock);
+       map = savedcmd->map_pid_to_cmdline[pid];
+       if (map != NO_CMDLINE_MAP)
+               tgid = saved_tgids[map];
+       else
+               tgid = -1;
+
+       arch_spin_unlock(&trace_cmdline_lock);
+       preempt_enable();
+
+       return tgid;
+}
+
 void tracing_record_cmdline(struct task_struct *tsk)
 {
        if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
@@ -2583,6 +2604,13 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
                    "#              | |       |          |         |\n");
 }
 
+static void print_func_help_header_tgid(struct trace_buffer *buf, struct seq_file *m)
+{
+       print_event_info(buf, m);
+       seq_puts(m, "#           TASK-PID    TGID   CPU#      TIMESTAMP  FUNCTION\n");
+       seq_puts(m, "#              | |        |      |          |         |\n");
+}
+
 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
 {
        print_event_info(buf, m);
@@ -2595,6 +2623,18 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
                    "#              | |       |   ||||       |         |\n");
 }
 
+static void print_func_help_header_irq_tgid(struct trace_buffer *buf, struct seq_file *m)
+{
+       print_event_info(buf, m);
+       seq_puts(m, "#                                      _-----=> irqs-off\n");
+       seq_puts(m, "#                                     / _----=> need-resched\n");
+       seq_puts(m, "#                                    | / _---=> hardirq/softirq\n");
+       seq_puts(m, "#                                    || / _--=> preempt-depth\n");
+       seq_puts(m, "#                                    ||| /     delay\n");
+       seq_puts(m, "#           TASK-PID    TGID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
+       seq_puts(m, "#              | |        |      |   ||||       |         |\n");
+}
+
 void
 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
 {
@@ -2907,9 +2947,15 @@ void trace_default_header(struct seq_file *m)
        } else {
                if (!(trace_flags & TRACE_ITER_VERBOSE)) {
                        if (trace_flags & TRACE_ITER_IRQ_INFO)
-                               print_func_help_header_irq(iter->trace_buffer, m);
+                               if (trace_flags & TRACE_ITER_TGID)
+                                       print_func_help_header_irq_tgid(iter->trace_buffer, m);
+                               else
+                                       print_func_help_header_irq(iter->trace_buffer, m);
                        else
-                               print_func_help_header(iter->trace_buffer, m);
+                               if (trace_flags & TRACE_ITER_TGID)
+                                       print_func_help_header_tgid(iter->trace_buffer, m);
+                               else
+                                       print_func_help_header(iter->trace_buffer, m);
                }
        }
 }
@@ -4160,6 +4206,50 @@ static void trace_insert_enum_map(struct module *mod,
        trace_insert_enum_map_file(mod, start, len);
 }
 
+static ssize_t
+tracing_saved_tgids_read(struct file *file, char __user *ubuf,
+                               size_t cnt, loff_t *ppos)
+{
+       char *file_buf;
+       char *buf;
+       int len = 0;
+       int pid;
+       int i;
+
+       file_buf = kmalloc(SAVED_CMDLINES_DEFAULT*(16+1+16), GFP_KERNEL);
+       if (!file_buf)
+               return -ENOMEM;
+
+       buf = file_buf;
+
+       for (i = 0; i < SAVED_CMDLINES_DEFAULT; i++) {
+               int tgid;
+               int r;
+
+               pid = savedcmd->map_cmdline_to_pid[i];
+               if (pid == -1 || pid == NO_CMDLINE_MAP)
+                       continue;
+
+               tgid = trace_find_tgid(pid);
+               r = sprintf(buf, "%d %d\n", pid, tgid);
+               buf += r;
+               len += r;
+       }
+
+       len = simple_read_from_buffer(ubuf, cnt, ppos,
+                                     file_buf, len);
+
+       kfree(file_buf);
+
+       return len;
+}
+
+static const struct file_operations tracing_saved_tgids_fops = {
+       .open   = tracing_open_generic,
+       .read   = tracing_saved_tgids_read,
+       .llseek = generic_file_llseek,
+};
+
 static ssize_t
 tracing_set_trace_read(struct file *filp, char __user *ubuf,
                       size_t cnt, loff_t *ppos)
@@ -6790,6 +6880,9 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
        trace_create_file("trace_marker", 0220, d_tracer,
                          tr, &tracing_mark_fops);
 
+       trace_create_file("saved_tgids", 0444, d_tracer,
+                         tr, &tracing_saved_tgids_fops);
+
        trace_create_file("trace_clock", 0644, d_tracer, tr,
                          &trace_clock_fops);
 
index 919d9d07686f5bcaad65699f400748a4b4db8a5e..e1265f95457fb6586a7c159ad771df6dd8c58db3 100644 (file)
@@ -656,6 +656,7 @@ static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
 extern cycle_t ftrace_now(int cpu);
 
 extern void trace_find_cmdline(int pid, char comm[]);
+extern int trace_find_tgid(int pid);
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 extern unsigned long ftrace_update_tot_cnt;
@@ -970,7 +971,8 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
                FUNCTION_FLAGS                                  \
                FGRAPH_FLAGS                                    \
                STACK_FLAGS                                     \
-               BRANCH_FLAGS
+               BRANCH_FLAGS                                    \
+               C(TGID,                 "print-tgid"),
 
 /*
  * By defining C, we can make TRACE_FLAGS a list of bit names
index a663cbb84107d71039200011e4b3ec5f897c9213..4641bdb40f8ff50aeee0f9c2d06eb462b3c19e40 100644 (file)
@@ -64,6 +64,9 @@ struct fgraph_data {
 
 #define TRACE_GRAPH_INDENT     2
 
+/* Flag options */
+#define TRACE_GRAPH_PRINT_FLAT         0x80
+
 static unsigned int max_depth;
 
 static struct tracer_opt trace_opts[] = {
@@ -87,6 +90,8 @@ static struct tracer_opt trace_opts[] = {
        { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
        /* Include time within nested functions */
        { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
+       /* Use standard trace formatting rather than hierarchical */
+       { TRACER_OPT(funcgraph-flat, TRACE_GRAPH_PRINT_FLAT) },
        { } /* Empty entry */
 };
 
@@ -1165,6 +1170,9 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags)
        int cpu = iter->cpu;
        int ret;
 
+       if (flags & TRACE_GRAPH_PRINT_FLAT)
+               return TRACE_TYPE_UNHANDLED;
+
        if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
                per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
                return TRACE_TYPE_HANDLED;
@@ -1222,13 +1230,6 @@ print_graph_function(struct trace_iterator *iter)
        return print_graph_function_flags(iter, tracer_flags.val);
 }
 
-static enum print_line_t
-print_graph_function_event(struct trace_iterator *iter, int flags,
-                          struct trace_event *event)
-{
-       return print_graph_function(iter);
-}
-
 static void print_lat_header(struct seq_file *s, u32 flags)
 {
        static const char spaces[] = "                " /* 16 spaces */
@@ -1297,6 +1298,11 @@ void print_graph_headers_flags(struct seq_file *s, u32 flags)
        struct trace_iterator *iter = s->private;
        struct trace_array *tr = iter->tr;
 
+       if (flags & TRACE_GRAPH_PRINT_FLAT) {
+               trace_default_header(s);
+               return;
+       }
+
        if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
                return;
 
@@ -1378,19 +1384,6 @@ func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
        return 0;
 }
 
-static struct trace_event_functions graph_functions = {
-       .trace          = print_graph_function_event,
-};
-
-static struct trace_event graph_trace_entry_event = {
-       .type           = TRACE_GRAPH_ENT,
-       .funcs          = &graph_functions,
-};
-
-static struct trace_event graph_trace_ret_event = {
-       .type           = TRACE_GRAPH_RET,
-       .funcs          = &graph_functions
-};
 
 static struct tracer graph_trace __tracer_data = {
        .name           = "function_graph",
@@ -1467,16 +1460,6 @@ static __init int init_graph_trace(void)
 {
        max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
 
-       if (!register_trace_event(&graph_trace_entry_event)) {
-               pr_warning("Warning: could not register graph trace events\n");
-               return 1;
-       }
-
-       if (!register_trace_event(&graph_trace_ret_event)) {
-               pr_warning("Warning: could not register graph trace events\n");
-               return 1;
-       }
-
        return register_tracer(&graph_trace);
 }
 
index 282982195e09ff40ad23f3ab1dca7e572c0ab8e6..3bc4b6de0f4d1f46983f460756b1b4b4972636a9 100644 (file)
@@ -526,11 +526,21 @@ int trace_print_context(struct trace_iterator *iter)
        unsigned long long t;
        unsigned long secs, usec_rem;
        char comm[TASK_COMM_LEN];
+       int tgid;
 
        trace_find_cmdline(entry->pid, comm);
 
-       trace_seq_printf(s, "%16s-%-5d [%03d] ",
-                              comm, entry->pid, iter->cpu);
+       trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
+
+       if (tr->trace_flags & TRACE_ITER_TGID) {
+               tgid = trace_find_tgid(entry->pid);
+               if (tgid < 0)
+                       trace_seq_puts(s, "(-----) ");
+               else
+                       trace_seq_printf(s, "(%5d) ", tgid);
+       }
+
+       trace_seq_printf(s, "[%03d] ", iter->cpu);
 
        if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
                trace_print_lat_fmt(s, entry);
@@ -845,6 +855,174 @@ static struct trace_event trace_fn_event = {
        .funcs          = &trace_fn_funcs,
 };
 
+/* TRACE_GRAPH_ENT */
+static enum print_line_t trace_graph_ent_trace(struct trace_iterator *iter, int flags,
+                                       struct trace_event *event)
+{
+       struct trace_seq *s = &iter->seq;
+       struct ftrace_graph_ent_entry *field;
+
+       trace_assign_type(field, iter->ent);
+
+       trace_seq_puts(s, "graph_ent: func=");
+       if (trace_seq_has_overflowed(s))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       if (!seq_print_ip_sym(s, field->graph_ent.func, flags))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       trace_seq_puts(s, "\n");
+       if (trace_seq_has_overflowed(s))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ent_raw(struct trace_iterator *iter, int flags,
+                                     struct trace_event *event)
+{
+       struct ftrace_graph_ent_entry *field;
+
+       trace_assign_type(field, iter->ent);
+
+       trace_seq_printf(&iter->seq, "%lx %d\n",
+                             field->graph_ent.func,
+                             field->graph_ent.depth);
+       if (trace_seq_has_overflowed(&iter->seq))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ent_hex(struct trace_iterator *iter, int flags,
+                                     struct trace_event *event)
+{
+       struct ftrace_graph_ent_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       SEQ_PUT_HEX_FIELD(s, field->graph_ent.func);
+       SEQ_PUT_HEX_FIELD(s, field->graph_ent.depth);
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ent_bin(struct trace_iterator *iter, int flags,
+                                     struct trace_event *event)
+{
+       struct ftrace_graph_ent_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       SEQ_PUT_FIELD(s, field->graph_ent.func);
+       SEQ_PUT_FIELD(s, field->graph_ent.depth);
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event_functions trace_graph_ent_funcs = {
+       .trace          = trace_graph_ent_trace,
+       .raw            = trace_graph_ent_raw,
+       .hex            = trace_graph_ent_hex,
+       .binary         = trace_graph_ent_bin,
+};
+
+static struct trace_event trace_graph_ent_event = {
+       .type           = TRACE_GRAPH_ENT,
+       .funcs          = &trace_graph_ent_funcs,
+};
+
+/* TRACE_GRAPH_RET */
+static enum print_line_t trace_graph_ret_trace(struct trace_iterator *iter, int flags,
+                                       struct trace_event *event)
+{
+       struct trace_seq *s = &iter->seq;
+       struct trace_entry *entry = iter->ent;
+       struct ftrace_graph_ret_entry *field;
+
+       trace_assign_type(field, entry);
+
+       trace_seq_puts(s, "graph_ret: func=");
+       if (trace_seq_has_overflowed(s))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       if (!seq_print_ip_sym(s, field->ret.func, flags))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       trace_seq_puts(s, "\n");
+       if (trace_seq_has_overflowed(s))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ret_raw(struct trace_iterator *iter, int flags,
+                                     struct trace_event *event)
+{
+       struct ftrace_graph_ret_entry *field;
+
+       trace_assign_type(field, iter->ent);
+
+       trace_seq_printf(&iter->seq, "%lx %lld %lld %ld %d\n",
+                             field->ret.func,
+                             field->ret.calltime,
+                             field->ret.rettime,
+                             field->ret.overrun,
+                             field->ret.depth);
+       if (trace_seq_has_overflowed(&iter->seq))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ret_hex(struct trace_iterator *iter, int flags,
+                                     struct trace_event *event)
+{
+       struct ftrace_graph_ret_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       SEQ_PUT_HEX_FIELD(s, field->ret.func);
+       SEQ_PUT_HEX_FIELD(s, field->ret.calltime);
+       SEQ_PUT_HEX_FIELD(s, field->ret.rettime);
+       SEQ_PUT_HEX_FIELD(s, field->ret.overrun);
+       SEQ_PUT_HEX_FIELD(s, field->ret.depth);
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ret_bin(struct trace_iterator *iter, int flags,
+                                     struct trace_event *event)
+{
+       struct ftrace_graph_ret_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       SEQ_PUT_FIELD(s, field->ret.func);
+       SEQ_PUT_FIELD(s, field->ret.calltime);
+       SEQ_PUT_FIELD(s, field->ret.rettime);
+       SEQ_PUT_FIELD(s, field->ret.overrun);
+       SEQ_PUT_FIELD(s, field->ret.depth);
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event_functions trace_graph_ret_funcs = {
+       .trace          = trace_graph_ret_trace,
+       .raw            = trace_graph_ret_raw,
+       .hex            = trace_graph_ret_hex,
+       .binary         = trace_graph_ret_bin,
+};
+
+static struct trace_event trace_graph_ret_event = {
+       .type           = TRACE_GRAPH_RET,
+       .funcs          = &trace_graph_ret_funcs,
+};
+
 /* TRACE_CTX an TRACE_WAKE */
 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
                                             char *delim)
@@ -1222,6 +1400,8 @@ static struct trace_event trace_print_event = {
 
 static struct trace_event *events[] __initdata = {
        &trace_fn_event,
+       &trace_graph_ent_event,
+       &trace_graph_ret_event,
        &trace_ctx_event,
        &trace_wake_event,
        &trace_stack_event,
index 198137b1cadc6a8438cf105270c0229ccc4149e5..1f1b05f5a94b8edaf014495a985168b39d73877f 100644 (file)
@@ -103,6 +103,11 @@ static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
+#endif
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static cpumask_t __read_mostly watchdog_cpus;
+#endif
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
 static unsigned long soft_lockup_nmi_warn;
@@ -114,7 +119,7 @@ static unsigned long soft_lockup_nmi_warn;
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
 unsigned int __read_mostly hardlockup_panic =
                        CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
-static unsigned long hardlockup_allcpu_dumped;
+static unsigned long __maybe_unused hardlockup_allcpu_dumped;
 /*
  * We may not want to enable hard lockup detection by default in all cases,
  * for example when running the kernel as a guest on a hypervisor. In these
@@ -271,7 +276,7 @@ void touch_softlockup_watchdog_sync(void)
        __this_cpu_write(watchdog_touch_ts, 0);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 /* watchdog detector functions */
 static bool is_hardlockup(void)
 {
@@ -285,6 +290,76 @@ static bool is_hardlockup(void)
 }
 #endif
 
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static unsigned int watchdog_next_cpu(unsigned int cpu)
+{
+       cpumask_t cpus = watchdog_cpus;
+       unsigned int next_cpu;
+
+       next_cpu = cpumask_next(cpu, &cpus);
+       if (next_cpu >= nr_cpu_ids)
+               next_cpu = cpumask_first(&cpus);
+
+       if (next_cpu == cpu)
+               return nr_cpu_ids;
+
+       return next_cpu;
+}
+
+static int is_hardlockup_other_cpu(unsigned int cpu)
+{
+       unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
+
+       if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
+               return 1;
+
+       per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
+       return 0;
+}
+
+static void watchdog_check_hardlockup_other_cpu(void)
+{
+       unsigned int next_cpu;
+
+       /*
+        * Test for hardlockups every 3 samples.  The sample period is
+        *  watchdog_thresh * 2 / 5, so 3 samples gets us back to slightly over
+        *  watchdog_thresh (over by 20%).
+        */
+       if (__this_cpu_read(hrtimer_interrupts) % 3 != 0)
+               return;
+
+       /* check for a hardlockup on the next cpu */
+       next_cpu = watchdog_next_cpu(smp_processor_id());
+       if (next_cpu >= nr_cpu_ids)
+               return;
+
+       smp_rmb();
+
+       if (per_cpu(watchdog_nmi_touch, next_cpu) == true) {
+               per_cpu(watchdog_nmi_touch, next_cpu) = false;
+               return;
+       }
+
+       if (is_hardlockup_other_cpu(next_cpu)) {
+               /* only warn once */
+               if (per_cpu(hard_watchdog_warn, next_cpu) == true)
+                       return;
+
+               if (hardlockup_panic)
+                       panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu);
+               else
+                       WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu);
+
+               per_cpu(hard_watchdog_warn, next_cpu) = true;
+       } else {
+               per_cpu(hard_watchdog_warn, next_cpu) = false;
+       }
+}
+#else
+static inline void watchdog_check_hardlockup_other_cpu(void) { return; }
+#endif
+
 static int is_softlockup(unsigned long touch_ts)
 {
        unsigned long now = get_timestamp();
@@ -297,7 +372,7 @@ static int is_softlockup(unsigned long touch_ts)
        return 0;
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 
 static struct perf_event_attr wd_hw_attr = {
        .type           = PERF_TYPE_HARDWARE,
@@ -360,7 +435,7 @@ static void watchdog_overflow_callback(struct perf_event *event,
        __this_cpu_write(hard_watchdog_warn, false);
        return;
 }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
 
 static void watchdog_interrupt_count(void)
 {
@@ -384,6 +459,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        /* kick the hardlockup detector */
        watchdog_interrupt_count();
 
+       /* test for hardlockups on the next cpu */
+       watchdog_check_hardlockup_other_cpu();
+
        /* kick the softlockup detector */
        wake_up_process(__this_cpu_read(softlockup_watchdog));
 
@@ -561,7 +639,7 @@ static void watchdog(unsigned int cpu)
                watchdog_nmi_disable(cpu);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 /*
  * People like the simple clean cpu node info on boot.
  * Reduce the watchdog noise by only printing messages
@@ -659,10 +737,45 @@ static void watchdog_nmi_disable(unsigned int cpu)
        }
 }
 
+#else
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static int watchdog_nmi_enable(unsigned int cpu)
+{
+       /*
+        * The new cpu will be marked online before the first hrtimer interrupt
+        * runs on it.  If another cpu tests for a hardlockup on the new cpu
+        * before it has run its first hrtimer, it will get a false positive.
+        * Touch the watchdog on the new cpu to delay the first check for at
+        * least 3 sampling periods to guarantee one hrtimer has run on the new
+        * cpu.
+        */
+       per_cpu(watchdog_nmi_touch, cpu) = true;
+       smp_wmb();
+       cpumask_set_cpu(cpu, &watchdog_cpus);
+       return 0;
+}
+
+static void watchdog_nmi_disable(unsigned int cpu)
+{
+       unsigned int next_cpu = watchdog_next_cpu(cpu);
+
+       /*
+        * Offlining this cpu will cause the cpu before this one to start
+        * checking the one after this one.  If this cpu just finished checking
+        * the next cpu and updating hrtimer_interrupts_saved, and then the
+        * previous cpu checks it within one sample period, it will trigger a
+        * false positive.  Touch the watchdog on the next cpu to prevent it.
+        */
+       if (next_cpu < nr_cpu_ids)
+               per_cpu(watchdog_nmi_touch, next_cpu) = true;
+       smp_wmb();
+       cpumask_clear_cpu(cpu, &watchdog_cpus);
+}
 #else
 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
 static void watchdog_nmi_disable(unsigned int cpu) { return; }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
 
 static struct smp_hotplug_thread watchdog_threads = {
        .store                  = &softlockup_watchdog,
index 8c15b29d5adc64214880cf7c066fb83349f612aa..63d14d9b51d889dceffc1492d4e72b220fae47cc 100644 (file)
@@ -707,15 +707,27 @@ config LOCKUP_DETECTOR
          The overhead should be minimal.  A periodic hrtimer runs to
          generate interrupts and kick the watchdog task every 4 seconds.
          An NMI is generated every 10 seconds or so to check for hardlockups.
+         If NMIs are not available on the platform, every 12 seconds the
+         hrtimer interrupt on one cpu will be used to check for hardlockups
+         on the next cpu.
 
          The frequency of hrtimer and NMI events and the soft and hard lockup
          thresholds can be controlled through the sysctl watchdog_thresh.
 
-config HARDLOCKUP_DETECTOR
+config HARDLOCKUP_DETECTOR_NMI
        def_bool y
        depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
        depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
 
+config HARDLOCKUP_DETECTOR_OTHER_CPU
+       def_bool y
+       depends on LOCKUP_DETECTOR && SMP
+       depends on !HARDLOCKUP_DETECTOR_NMI && !HAVE_NMI_WATCHDOG
+
+config HARDLOCKUP_DETECTOR
+       def_bool y
+       depends on HARDLOCKUP_DETECTOR_NMI || HARDLOCKUP_DETECTOR_OTHER_CPU
+
 config BOOTPARAM_HARDLOCKUP_PANIC
        bool "Panic (Reboot) On Hard Lockups"
        depends on HARDLOCKUP_DETECTOR
@@ -855,6 +867,15 @@ config SCHED_INFO
        bool
        default n
 
+config PANIC_ON_RT_THROTTLING
+       bool "Panic on RT throttling"
+       help
+         Say Y here to enable the kernel to panic when a realtime
+         runqueue is throttled. This may be useful for detecting
+         and debugging RT throttling issues.
+
+         Say N if unsure.
+
 config SCHEDSTATS
        bool "Collect scheduler statistics"
        depends on DEBUG_KERNEL && PROC_FS
index 5a003a2ebd967cfe6a9e202ea9a1ae7f1ddf1aba..05efc1fa97f08941d763996d38ce08afbbaba459 100644 (file)
@@ -1,5 +1,6 @@
 #include <linux/compiler.h>
 #include <linux/export.h>
+#include <linux/thread_info.h>
 #include <linux/uaccess.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
@@ -109,6 +110,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
                unsigned long max = max_addr - src_addr;
                long retval;
 
+               check_object_size(dst, count, false);
                user_access_begin();
                retval = do_strncpy_from_user(dst, src, count, max);
                user_access_end();
index c889fcbb530e98d8779ef75750e1fde08bf786cf..c154e107630307715da37d290417de8e2cd8adb7 100644 (file)
@@ -104,7 +104,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
        *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
                          vma->vm_file, pgoff, vma_policy(vma),
-                         vma->vm_userfaultfd_ctx);
+                         vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
        if (*prev) {
                vma = *prev;
                goto success;
index d300f1329814ba538b271fafabb0351d3182d639..07ff069fef256055f776803c708754db310d74f2 100644 (file)
@@ -822,6 +822,17 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
        return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
 }
 
+/**
+ * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
+ * @base: the base phys addr of the region
+ * @size: the size of the region
+ *
+ * Return 0 on success, -errno on failure.
+ */
+int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
+{
+       return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
+}
 
 /**
  * __next_reserved_mem_region - next function for for_each_reserved_region()
@@ -913,6 +924,10 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
                if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
                        continue;
 
+               /* skip nomap memory unless we were asked for it explicitly */
+               if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
+                       continue;
+
                if (!type_b) {
                        if (out_start)
                                *out_start = m_start;
@@ -1022,6 +1037,10 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
                if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
                        continue;
 
+               /* skip nomap memory unless we were asked for it explicitly */
+               if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
+                       continue;
+
                if (!type_b) {
                        if (out_start)
                                *out_start = m_start;
@@ -1519,6 +1538,15 @@ int __init_memblock memblock_is_memory(phys_addr_t addr)
        return memblock_search(&memblock.memory, addr) != -1;
 }
 
+int __init_memblock memblock_is_map_memory(phys_addr_t addr)
+{
+       int i = memblock_search(&memblock.memory, addr);
+
+       if (i == -1)
+               return false;
+       return !memblock_is_nomap(&memblock.memory.regions[i]);
+}
+
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
                         unsigned long *start_pfn, unsigned long *end_pfn)
index 5d9c8a3136bc0404e086487b7b4a87a102a2fe2c..17dfe70f3309778e92d6968ed87b15d4baa7897a 100644 (file)
@@ -5352,6 +5352,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
        .css_reset = mem_cgroup_css_reset,
        .can_attach = mem_cgroup_can_attach,
        .cancel_attach = mem_cgroup_cancel_attach,
+       .attach = mem_cgroup_move_task,
        .post_attach = mem_cgroup_move_task,
        .bind = mem_cgroup_bind,
        .dfl_cftypes = memory_files,
index 87a177917cb2e60a13b09e6a53836ccd9f9275bf..f20eb4e8c4ccd4566fe8c46cdfde91ad1115c6d8 100644 (file)
@@ -720,7 +720,8 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
                        ((vmstart - vma->vm_start) >> PAGE_SHIFT);
                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
                                 vma->anon_vma, vma->vm_file, pgoff,
-                                new_pol, vma->vm_userfaultfd_ctx);
+                                new_pol, vma->vm_userfaultfd_ctx,
+                                vma_get_anon_name(vma));
                if (prev) {
                        vma = prev;
                        next = vma->vm_next;
index d6006b146fea38c0f897bbcb721dec70a189c92c..d843bc9d32ddb077aa4aadfb156a4e2017ee94d5 100644 (file)
@@ -512,7 +512,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
        *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
                          vma->vm_file, pgoff, vma_policy(vma),
-                         vma->vm_userfaultfd_ctx);
+                         vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
        if (*prev) {
                vma = *prev;
                goto success;
index 455772a05e5416720babd944532004fe529be1fd..a089cca8d79aae15a561c8dba77d45b0c39f95dc 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
 #define arch_rebalance_pgtables(addr, len)             (addr)
 #endif
 
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
+const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
+const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
+int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
+#endif
+#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
+const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
+const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
+int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
+#endif
+
+
 static void unmap_region(struct mm_struct *mm,
                struct vm_area_struct *vma, struct vm_area_struct *prev,
                unsigned long start, unsigned long end);
@@ -925,7 +937,8 @@ again:                      remove_next = 1 + (end > next->vm_end);
  */
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
                                struct file *file, unsigned long vm_flags,
-                               struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+                               struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+                               const char __user *anon_name)
 {
        /*
         * VM_SOFTDIRTY should not prevent from VMA merging, if we
@@ -943,6 +956,8 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
                return 0;
        if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
                return 0;
+       if (vma_get_anon_name(vma) != anon_name)
+               return 0;
        return 1;
 }
 
@@ -975,9 +990,10 @@ static int
 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
                     struct anon_vma *anon_vma, struct file *file,
                     pgoff_t vm_pgoff,
-                    struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+                    struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+                    const char __user *anon_name)
 {
-       if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
+       if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                if (vma->vm_pgoff == vm_pgoff)
                        return 1;
@@ -996,9 +1012,10 @@ static int
 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
                    struct anon_vma *anon_vma, struct file *file,
                    pgoff_t vm_pgoff,
-                   struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+                   struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+                   const char __user *anon_name)
 {
-       if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
+       if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                pgoff_t vm_pglen;
                vm_pglen = vma_pages(vma);
@@ -1009,9 +1026,9 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
 }
 
 /*
- * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
- * whether that can be merged with its predecessor or its successor.
- * Or both (it neatly fills a hole).
+ * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
+ * figure out whether that can be merged with its predecessor or its
+ * successor.  Or both (it neatly fills a hole).
  *
  * In most cases - when called for mmap, brk or mremap - [addr,end) is
  * certain not to be mapped by the time vma_merge is called; but when
@@ -1042,7 +1059,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                        unsigned long end, unsigned long vm_flags,
                        struct anon_vma *anon_vma, struct file *file,
                        pgoff_t pgoff, struct mempolicy *policy,
-                       struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+                       struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+                       const char __user *anon_name)
 {
        pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
        struct vm_area_struct *area, *next;
@@ -1070,7 +1088,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                        mpol_equal(vma_policy(prev), policy) &&
                        can_vma_merge_after(prev, vm_flags,
                                            anon_vma, file, pgoff,
-                                           vm_userfaultfd_ctx)) {
+                                           vm_userfaultfd_ctx,
+                                           anon_name)) {
                /*
                 * OK, it can.  Can we now merge in the successor as well?
                 */
@@ -1079,7 +1098,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                                can_vma_merge_before(next, vm_flags,
                                                     anon_vma, file,
                                                     pgoff+pglen,
-                                                    vm_userfaultfd_ctx) &&
+                                                    vm_userfaultfd_ctx,
+                                                    anon_name) &&
                                is_mergeable_anon_vma(prev->anon_vma,
                                                      next->anon_vma, NULL)) {
                                                        /* cases 1, 6 */
@@ -1101,7 +1121,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                        mpol_equal(policy, vma_policy(next)) &&
                        can_vma_merge_before(next, vm_flags,
                                             anon_vma, file, pgoff+pglen,
-                                            vm_userfaultfd_ctx)) {
+                                            vm_userfaultfd_ctx,
+                                            anon_name)) {
                if (prev && addr < prev->vm_end)        /* case 4 */
                        err = vma_adjust(prev, prev->vm_start,
                                addr, prev->vm_pgoff, NULL);
@@ -1585,7 +1606,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
         * Can we just expand an old mapping?
         */
        vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
-                       NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
+                       NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
        if (vma)
                goto out;
 
@@ -2613,6 +2634,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
 
        return 0;
 }
+EXPORT_SYMBOL(do_munmap);
 
 int vm_munmap(unsigned long start, size_t len)
 {
@@ -2790,7 +2812,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
 
        /* Can we just expand an old private anonymous mapping? */
        vma = vma_merge(mm, prev, addr, addr + len, flags,
-                       NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
+                       NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
        if (vma)
                goto out;
 
@@ -2948,7 +2970,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                return NULL;    /* should never get here */
        new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
                            vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
-                           vma->vm_userfaultfd_ctx);
+                           vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
        if (new_vma) {
                /*
                 * Source vma may have been merged into new_vma
index ef5be8eaab001792b469fac1bd5b43cb139d1b0b..bddb2c75492d06b4806c6ed91c0352c05a9555f3 100644 (file)
@@ -293,7 +293,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
        *pprev = vma_merge(mm, *pprev, start, end, newflags,
                           vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
-                          vma->vm_userfaultfd_ctx);
+                          vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
        if (*pprev) {
                vma = *pprev;
                goto success;
index b0ca09f607b4c6bb6db5c4c54c8b28e455599121..cc99ff2d85c5bca1b12b11f128a66c9ce10f5373 100644 (file)
@@ -231,9 +231,21 @@ compound_page_dtor * const compound_page_dtors[] = {
 #endif
 };
 
+/*
+ * Try to keep at least this much lowmem free.  Do not allow normal
+ * allocations below this point, only high priority ones. Automatically
+ * tuned according to the amount of memory in the system.
+ */
 int min_free_kbytes = 1024;
 int user_min_free_kbytes = -1;
 
+/*
+ * Extra memory for the system to try freeing. Used to temporarily
+ * free memory, to make space for new workloads. Anyone can allocate
+ * down to the min watermarks controlled by min_free_kbytes above.
+ */
+int extra_free_kbytes = 0;
+
 static unsigned long __meminitdata nr_kernel_pages;
 static unsigned long __meminitdata nr_all_pages;
 static unsigned long __meminitdata dma_reserve;
@@ -6022,6 +6034,7 @@ static void setup_per_zone_lowmem_reserve(void)
 static void __setup_per_zone_wmarks(void)
 {
        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
+       unsigned long pages_low = extra_free_kbytes >> (PAGE_SHIFT - 10);
        unsigned long lowmem_pages = 0;
        struct zone *zone;
        unsigned long flags;
@@ -6033,11 +6046,14 @@ static void __setup_per_zone_wmarks(void)
        }
 
        for_each_zone(zone) {
-               u64 tmp;
+               u64 min, low;
 
                spin_lock_irqsave(&zone->lock, flags);
-               tmp = (u64)pages_min * zone->managed_pages;
-               do_div(tmp, lowmem_pages);
+               min = (u64)pages_min * zone->managed_pages;
+               do_div(min, lowmem_pages);
+               low = (u64)pages_low * zone->managed_pages;
+               do_div(low, vm_total_pages);
+
                if (is_highmem(zone)) {
                        /*
                         * __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -6058,11 +6074,13 @@ static void __setup_per_zone_wmarks(void)
                         * If it's a lowmem zone, reserve a number of pages
                         * proportionate to the zone's size.
                         */
-                       zone->watermark[WMARK_MIN] = tmp;
+                       zone->watermark[WMARK_MIN] = min;
                }
 
-               zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
-               zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+               zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) +
+                                       low + (min >> 2);
+               zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) +
+                                       low + (min >> 1);
 
                __mod_zone_page_state(zone, NR_ALLOC_BATCH,
                        high_wmark_pages(zone) - low_wmark_pages(zone) -
@@ -6185,7 +6203,7 @@ core_initcall(init_per_zone_wmark_min)
 /*
  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  *     that we can call two helper functions whenever min_free_kbytes
- *     changes.
+ *     or extra_free_kbytes changes.
  */
 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
        void __user *buffer, size_t *length, loff_t *ppos)
index 1b11ccc0a3b75ab5bd28aa401f2e0800870962c6..79997e8cf807c54b8a8217df6f9b70ef9f4e981f 100644 (file)
@@ -3405,6 +3405,14 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
 }
 EXPORT_SYMBOL_GPL(shmem_file_setup);
 
+void shmem_set_file(struct vm_area_struct *vma, struct file *file)
+{
+       if (vma->vm_file)
+               fput(vma->vm_file);
+       vma->vm_file = file;
+       vma->vm_ops = &shmem_vm_ops;
+}
+
 /**
  * shmem_zero_setup - setup a shared anonymous mapping
  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
@@ -3424,10 +3432,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       if (vma->vm_file)
-               fput(vma->vm_file);
-       vma->vm_file = file;
-       vma->vm_ops = &shmem_vm_ops;
+       shmem_set_file(vma, file);
        return 0;
 }
 
index c56b97b7c49ccf579784d65cccd7908310d64c38..b34996a3860b0bad6676f26eeb38a7469cc63de4 100644 (file)
@@ -15,6 +15,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/mm.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <asm/sections.h>
 
@@ -124,7 +125,7 @@ static inline const char *check_kernel_text_object(const void *ptr,
 static inline const char *check_bogus_address(const void *ptr, unsigned long n)
 {
        /* Reject if object wraps past end of memory. */
-       if (ptr + n < ptr)
+       if ((unsigned long)ptr + n < (unsigned long)ptr)
                return "<wrapped address>";
 
        /* Reject if NULL or ZERO-allocation. */
index c54fd2924f25af960462e474fa3583c633f9fcc8..83a003bc3cae54e3c2b1071249a5c282f70d3220 100644 (file)
@@ -460,7 +460,7 @@ static int fold_diff(int *diff)
  *
  * The function returns the number of global counters updated.
  */
-static int refresh_cpu_vm_stats(void)
+static int refresh_cpu_vm_stats(bool do_pagesets)
 {
        struct zone *zone;
        int i;
@@ -484,33 +484,35 @@ static int refresh_cpu_vm_stats(void)
 #endif
                        }
                }
-               cond_resched();
 #ifdef CONFIG_NUMA
-               /*
-                * Deal with draining the remote pageset of this
-                * processor
-                *
-                * Check if there are pages remaining in this pageset
-                * if not then there is nothing to expire.
-                */
-               if (!__this_cpu_read(p->expire) ||
+               if (do_pagesets) {
+                       cond_resched();
+                       /*
+                        * Deal with draining the remote pageset of this
+                        * processor
+                        *
+                        * Check if there are pages remaining in this pageset
+                        * if not then there is nothing to expire.
+                        */
+                       if (!__this_cpu_read(p->expire) ||
                               !__this_cpu_read(p->pcp.count))
-                       continue;
+                               continue;
 
-               /*
-                * We never drain zones local to this processor.
-                */
-               if (zone_to_nid(zone) == numa_node_id()) {
-                       __this_cpu_write(p->expire, 0);
-                       continue;
-               }
+                       /*
+                        * We never drain zones local to this processor.
+                        */
+                       if (zone_to_nid(zone) == numa_node_id()) {
+                               __this_cpu_write(p->expire, 0);
+                               continue;
+                       }
 
-               if (__this_cpu_dec_return(p->expire))
-                       continue;
+                       if (__this_cpu_dec_return(p->expire))
+                               continue;
 
-               if (__this_cpu_read(p->pcp.count)) {
-                       drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
-                       changes++;
+                       if (__this_cpu_read(p->pcp.count)) {
+                               drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
+                               changes++;
+                       }
                }
 #endif
        }
@@ -1386,7 +1388,7 @@ static cpumask_var_t cpu_stat_off;
 
 static void vmstat_update(struct work_struct *w)
 {
-       if (refresh_cpu_vm_stats()) {
+       if (refresh_cpu_vm_stats(true)) {
                /*
                 * Counters were updated so we expect more updates
                 * to occur in the future. Keep on running the
@@ -1417,6 +1419,23 @@ static void vmstat_update(struct work_struct *w)
        }
 }
 
+/*
+ * Switch off vmstat processing and then fold all the remaining differentials
+ * until the diffs stay at zero. The function is used by NOHZ and can only be
+ * invoked when tick processing is not active.
+ */
+void quiet_vmstat(void)
+{
+       if (system_state != SYSTEM_RUNNING)
+               return;
+
+       do {
+               if (!cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
+                       cancel_delayed_work(this_cpu_ptr(&vmstat_work));
+
+       } while (refresh_cpu_vm_stats(false));
+}
+
 /*
  * Check if the diffs for a certain cpu indicate that
  * an update is needed.
@@ -1449,7 +1468,7 @@ static bool need_update(int cpu)
  */
 static void vmstat_shepherd(struct work_struct *w);
 
-static DECLARE_DELAYED_WORK(shepherd, vmstat_shepherd);
+static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
 
 static void vmstat_shepherd(struct work_struct *w)
 {
index 127da94ae25eb73e8ffd45e7e7dbc9e07d937033..ce9585cf343a7a3848c851c9d62ca2b7f8c39708 100644 (file)
@@ -86,6 +86,12 @@ source "net/netlabel/Kconfig"
 
 endif # if INET
 
+config ANDROID_PARANOID_NETWORK
+       bool "Only allow certain groups to create sockets"
+       default y
+       help
+               none
+
 config NETWORK_SECMARK
        bool "Security Marking"
        help
index 70306cc9d8140f696e440de10f9fff864b0abd94..709ce9fb15f3fd1ff7711dff89a3332802db65b0 100644 (file)
@@ -106,11 +106,40 @@ void bt_sock_unregister(int proto)
 }
 EXPORT_SYMBOL(bt_sock_unregister);
 
+#ifdef CONFIG_PARANOID_NETWORK
+static inline int current_has_bt_admin(void)
+{
+       return !current_euid();
+}
+
+static inline int current_has_bt(void)
+{
+       return current_has_bt_admin();
+}
+# else
+static inline int current_has_bt_admin(void)
+{
+       return 1;
+}
+
+static inline int current_has_bt(void)
+{
+       return 1;
+}
+#endif
+
 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
                          int kern)
 {
        int err;
 
+       if (proto == BTPROTO_RFCOMM || proto == BTPROTO_SCO ||
+                       proto == BTPROTO_L2CAP) {
+               if (!current_has_bt())
+                       return -EPERM;
+       } else if (!current_has_bt_admin())
+               return -EPERM;
+
        if (net != &init_net)
                return -EAFNOSUPPORT;
 
index 2c8095a5d824feeba6afa186074d590fd9777b79..0346c215ff6adf581eaf78cbdbd7c295885243e3 100644 (file)
@@ -48,16 +48,17 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
-       u64_stats_update_begin(&brstats->syncp);
-       brstats->tx_packets++;
-       brstats->tx_bytes += skb->len;
-       u64_stats_update_end(&brstats->syncp);
-
        BR_INPUT_SKB_CB(skb)->brdev = dev;
 
        skb_reset_mac_header(skb);
        skb_pull(skb, ETH_HLEN);
 
+       u64_stats_update_begin(&brstats->syncp);
+       brstats->tx_packets++;
+       /* Exclude ETH_HLEN from byte stats for consistency with Rx chain */
+       brstats->tx_bytes += skb->len;
+       u64_stats_update_end(&brstats->syncp);
+
        if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid))
                goto out;
 
index 365de66436aca8dba3868aa565d4cb77353b58d1..3fbd839f6d20739996773eb15c2c0c0fb028f9d9 100644 (file)
@@ -33,6 +33,8 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
        r->table = table;
        r->flags = flags;
        r->fr_net = ops->fro_net;
+       r->uid_start = INVALID_UID;
+       r->uid_end = INVALID_UID;
 
        r->suppress_prefixlen = -1;
        r->suppress_ifgroup = -1;
@@ -172,6 +174,23 @@ void fib_rules_unregister(struct fib_rules_ops *ops)
 }
 EXPORT_SYMBOL_GPL(fib_rules_unregister);
 
+static inline kuid_t fib_nl_uid(struct nlattr *nla)
+{
+       return make_kuid(current_user_ns(), nla_get_u32(nla));
+}
+
+static int nla_put_uid(struct sk_buff *skb, int idx, kuid_t uid)
+{
+       return nla_put_u32(skb, idx, from_kuid_munged(current_user_ns(), uid));
+}
+
+static int fib_uid_range_match(struct flowi *fl, struct fib_rule *rule)
+{
+       return (!uid_valid(rule->uid_start) && !uid_valid(rule->uid_end)) ||
+              (uid_gte(fl->flowi_uid, rule->uid_start) &&
+               uid_lte(fl->flowi_uid, rule->uid_end));
+}
+
 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
                          struct flowi *fl, int flags)
 {
@@ -189,6 +208,9 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
        if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
                goto out;
 
+       if (!fib_uid_range_match(fl, rule))
+               goto out;
+
        ret = ops->match(rule, fl, flags);
 out:
        return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
@@ -371,6 +393,19 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
        } else if (rule->action == FR_ACT_GOTO)
                goto errout_free;
 
+       /* UID start and end must either both be valid or both unspecified. */
+       rule->uid_start = rule->uid_end = INVALID_UID;
+       if (tb[FRA_UID_START] || tb[FRA_UID_END]) {
+               if (tb[FRA_UID_START] && tb[FRA_UID_END]) {
+                       rule->uid_start = fib_nl_uid(tb[FRA_UID_START]);
+                       rule->uid_end = fib_nl_uid(tb[FRA_UID_END]);
+               }
+               if (!uid_valid(rule->uid_start) ||
+                   !uid_valid(rule->uid_end) ||
+                   !uid_lte(rule->uid_start, rule->uid_end))
+               goto errout_free;
+       }
+
        err = ops->configure(rule, skb, frh, tb);
        if (err < 0)
                goto errout_free;
@@ -483,6 +518,14 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
                    (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID])))
                        continue;
 
+               if (tb[FRA_UID_START] &&
+                   !uid_eq(rule->uid_start, fib_nl_uid(tb[FRA_UID_START])))
+                       continue;
+
+               if (tb[FRA_UID_END] &&
+                   !uid_eq(rule->uid_end, fib_nl_uid(tb[FRA_UID_END])))
+                       continue;
+
                if (!ops->compare(rule, frh, tb))
                        continue;
 
@@ -549,7 +592,9 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
                         + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
                         + nla_total_size(4) /* FRA_FWMARK */
                         + nla_total_size(4) /* FRA_FWMASK */
-                        + nla_total_size(8); /* FRA_TUN_ID */
+                        + nla_total_size(8) /* FRA_TUN_ID */
+                        + nla_total_size(4) /* FRA_UID_START */
+                        + nla_total_size(4); /* FRA_UID_END */
 
        if (ops->nlmsg_payload)
                payload += ops->nlmsg_payload(rule);
@@ -607,7 +652,11 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
            (rule->target &&
             nla_put_u32(skb, FRA_GOTO, rule->target)) ||
            (rule->tun_id &&
-            nla_put_be64(skb, FRA_TUN_ID, rule->tun_id)))
+            nla_put_be64(skb, FRA_TUN_ID, rule->tun_id)) ||
+           (uid_valid(rule->uid_start) &&
+            nla_put_uid(skb, FRA_UID_START, rule->uid_start)) ||
+           (uid_valid(rule->uid_end) &&
+            nla_put_uid(skb, FRA_UID_END, rule->uid_end)))
                goto nla_put_failure;
 
        if (rule->suppress_ifgroup != -1) {
index 0c1d58d43f67c46c7a11081d0ecdd30cb5878529..3963c3872c69b806afe6788ec1a4e588adc9cb6e 100644 (file)
@@ -214,7 +214,7 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
 }
 EXPORT_SYMBOL_GPL(sock_diag_unregister);
 
-static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        int err;
        struct sock_diag_req *req = nlmsg_data(nlh);
@@ -234,8 +234,12 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        hndl = sock_diag_handlers[req->sdiag_family];
        if (hndl == NULL)
                err = -ENOENT;
-       else
+       else if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY)
                err = hndl->dump(skb, nlh);
+       else if (nlh->nlmsg_type == SOCK_DESTROY_BACKPORT && hndl->destroy)
+               err = hndl->destroy(skb, nlh);
+       else
+               err = -EOPNOTSUPP;
        mutex_unlock(&sock_diag_table_mutex);
 
        return err;
@@ -261,7 +265,8 @@ static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 
                return ret;
        case SOCK_DIAG_BY_FAMILY:
-               return __sock_diag_rcv_msg(skb, nlh);
+       case SOCK_DESTROY_BACKPORT:
+               return __sock_diag_cmd(skb, nlh);
        default:
                return -EINVAL;
        }
@@ -295,6 +300,18 @@ static int sock_diag_bind(struct net *net, int group)
        return 0;
 }
 
+int sock_diag_destroy(struct sock *sk, int err)
+{
+       if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+               return -EPERM;
+
+       if (!sk->sk_prot->diag_destroy)
+               return -EOPNOTSUPP;
+
+       return sk->sk_prot->diag_destroy(sk, err);
+}
+EXPORT_SYMBOL_GPL(sock_diag_destroy);
+
 static int __net_init diag_net_init(struct net *net)
 {
        struct netlink_kernel_cfg cfg = {
index 416dfa004cfb1b6b7c176096b69e33b104499cf7..c22920525e5d844bd7e4210b233440b883c2681e 100644 (file)
@@ -436,6 +436,19 @@ config INET_UDP_DIAG
          Support for UDP socket monitoring interface used by the ss tool.
          If unsure, say Y.
 
+config INET_DIAG_DESTROY
+       bool "INET: allow privileged process to administratively close sockets"
+       depends on INET_DIAG
+       default n
+       ---help---
+         Provides a SOCK_DESTROY operation that allows privileged processes
+         (e.g., a connection manager or a network administration tool such as
+         ss) to close sockets opened by other processes. Closing a socket in
+         this way interrupts any blocking read/write/connect operations on
+         the socket and causes future socket calls to behave as if the socket
+         had been disconnected.
+         If unsure, say N.
+
 menuconfig TCP_CONG_ADVANCED
        bool "TCP: advanced congestion control"
        ---help---
index c29809f765dc5d4d95edd5d6ac3cc321fcb97c88..854c4bfd6eeaba6abd64a9fb90e7d99b09567f74 100644 (file)
@@ -16,6 +16,7 @@ obj-y     := route.o inetpeer.o protocol.o \
 
 obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o
 obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
+obj-$(CONFIG_SYSFS) += sysfs_net_ipv4.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
 obj-$(CONFIG_IP_MROUTE) += ipmr.o
index afc18e9ca94adeb33d6344481839701971815cbc..68bf7bdf7fdb71a7e5abd54e8826e1cfb67e52ec 100644 (file)
 #endif
 #include <net/l3mdev.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+       return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+       return 1;
+}
+#endif
 
 /* The inetsw table contains everything that inet_create needs to
  * build a new socket.
@@ -260,6 +273,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
        if (protocol < 0 || protocol >= IPPROTO_MAX)
                return -EINVAL;
 
+       if (!current_has_network())
+               return -EACCES;
+
        sock->state = SS_UNCONNECTED;
 
        /* Look for the requested type/protocol pair. */
@@ -308,8 +324,7 @@ lookup_protocol:
        }
 
        err = -EPERM;
-       if (sock->type == SOCK_RAW && !kern &&
-           !ns_capable(net->user_ns, CAP_NET_RAW))
+       if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
                goto out_rcu_unlock;
 
        sock->ops = answer->ops;
index 63566ec54794db8213c2c935d75c2ba32dd71d61..98c754e61024e14f46e6112d6e1a90fb88a013f0 100644 (file)
@@ -627,6 +627,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
        [RTA_FLOW]              = { .type = NLA_U32 },
        [RTA_ENCAP_TYPE]        = { .type = NLA_U16 },
        [RTA_ENCAP]             = { .type = NLA_NESTED },
+       [RTA_UID]               = { .type = NLA_U32 },
 };
 
 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
index 64148914803a8443ecc0de2a45c141ae72cc0258..030cd09dd2a24d2480346a0e19fece73356c8c0e 100644 (file)
@@ -420,7 +420,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
                           ireq->ir_loc_addr, ireq->ir_rmt_port,
-                          htons(ireq->ir_num));
+                          htons(ireq->ir_num), sock_i_uid((struct sock *)sk));
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
@@ -457,7 +457,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
                           ireq->ir_loc_addr, ireq->ir_rmt_port,
-                          htons(ireq->ir_num));
+                          htons(ireq->ir_num), sock_i_uid((struct sock *)sk));
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
index ab9f8a66615d0872b586a0c2745196c58f6026be..fcb83b2a61f00681e1d918c0ec862c051d537e71 100644 (file)
@@ -44,6 +44,8 @@ struct inet_diag_entry {
        u16 dport;
        u16 family;
        u16 userlocks;
+       u32 ifindex;
+       u32 mark;
 };
 
 static DEFINE_MUTEX(inet_diag_table_mutex);
@@ -96,6 +98,7 @@ static size_t inet_sk_attr_size(void)
                + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
                + nla_total_size(1) /* INET_DIAG_TOS */
                + nla_total_size(1) /* INET_DIAG_TCLASS */
+               + nla_total_size(4) /* INET_DIAG_MARK */
                + nla_total_size(sizeof(struct inet_diag_meminfo))
                + nla_total_size(sizeof(struct inet_diag_msg))
                + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
@@ -108,7 +111,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                      struct sk_buff *skb, const struct inet_diag_req_v2 *req,
                      struct user_namespace *user_ns,
                      u32 portid, u32 seq, u16 nlmsg_flags,
-                     const struct nlmsghdr *unlh)
+                     const struct nlmsghdr *unlh,
+                     bool net_admin)
 {
        const struct inet_sock *inet = inet_sk(sk);
        const struct tcp_congestion_ops *ca_ops;
@@ -158,6 +162,9 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
        }
 #endif
 
+       if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
+               goto errout;
+
        r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
        r->idiag_inode = sock_i_ino(sk);
 
@@ -256,10 +263,11 @@ static int inet_csk_diag_fill(struct sock *sk,
                              const struct inet_diag_req_v2 *req,
                              struct user_namespace *user_ns,
                              u32 portid, u32 seq, u16 nlmsg_flags,
-                             const struct nlmsghdr *unlh)
+                             const struct nlmsghdr *unlh,
+                             bool net_admin)
 {
-       return inet_sk_diag_fill(sk, inet_csk(sk), skb, req,
-                                user_ns, portid, seq, nlmsg_flags, unlh);
+       return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns,
+                                portid, seq, nlmsg_flags, unlh, net_admin);
 }
 
 static int inet_twsk_diag_fill(struct sock *sk,
@@ -301,8 +309,9 @@ static int inet_twsk_diag_fill(struct sock *sk,
 
 static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
                              u32 portid, u32 seq, u16 nlmsg_flags,
-                             const struct nlmsghdr *unlh)
+                             const struct nlmsghdr *unlh, bool net_admin)
 {
+       struct request_sock *reqsk = inet_reqsk(sk);
        struct inet_diag_msg *r;
        struct nlmsghdr *nlh;
        long tmo;
@@ -316,7 +325,7 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
        inet_diag_msg_common_fill(r, sk);
        r->idiag_state = TCP_SYN_RECV;
        r->idiag_timer = 1;
-       r->idiag_retrans = inet_reqsk(sk)->num_retrans;
+       r->idiag_retrans = reqsk->num_retrans;
 
        BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
                     offsetof(struct sock, sk_cookie));
@@ -328,6 +337,10 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
        r->idiag_uid    = 0;
        r->idiag_inode  = 0;
 
+       if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
+                                    inet_rsk(reqsk)->ir_mark))
+               return -EMSGSIZE;
+
        nlmsg_end(skb, nlh);
        return 0;
 }
@@ -336,7 +349,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
                        const struct inet_diag_req_v2 *r,
                        struct user_namespace *user_ns,
                        u32 portid, u32 seq, u16 nlmsg_flags,
-                       const struct nlmsghdr *unlh)
+                       const struct nlmsghdr *unlh, bool net_admin)
 {
        if (sk->sk_state == TCP_TIME_WAIT)
                return inet_twsk_diag_fill(sk, skb, portid, seq,
@@ -344,46 +357,66 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
 
        if (sk->sk_state == TCP_NEW_SYN_RECV)
                return inet_req_diag_fill(sk, skb, portid, seq,
-                                         nlmsg_flags, unlh);
+                                         nlmsg_flags, unlh, net_admin);
 
        return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
-                                 nlmsg_flags, unlh);
+                                 nlmsg_flags, unlh, net_admin);
 }
 
-int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
-                           struct sk_buff *in_skb,
-                           const struct nlmsghdr *nlh,
-                           const struct inet_diag_req_v2 *req)
+struct sock *inet_diag_find_one_icsk(struct net *net,
+                                    struct inet_hashinfo *hashinfo,
+                                    const struct inet_diag_req_v2 *req)
 {
-       struct net *net = sock_net(in_skb->sk);
-       struct sk_buff *rep;
        struct sock *sk;
-       int err;
 
-       err = -EINVAL;
        if (req->sdiag_family == AF_INET)
                sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0],
                                 req->id.idiag_dport, req->id.idiag_src[0],
                                 req->id.idiag_sport, req->id.idiag_if);
 #if IS_ENABLED(CONFIG_IPV6)
-       else if (req->sdiag_family == AF_INET6)
-               sk = inet6_lookup(net, hashinfo,
-                                 (struct in6_addr *)req->id.idiag_dst,
-                                 req->id.idiag_dport,
-                                 (struct in6_addr *)req->id.idiag_src,
-                                 req->id.idiag_sport,
-                                 req->id.idiag_if);
+       else if (req->sdiag_family == AF_INET6) {
+               if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
+                   ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
+                       sk = inet_lookup(net, hashinfo, req->id.idiag_dst[3],
+                                        req->id.idiag_dport, req->id.idiag_src[3],
+                                        req->id.idiag_sport, req->id.idiag_if);
+               else
+                       sk = inet6_lookup(net, hashinfo,
+                                         (struct in6_addr *)req->id.idiag_dst,
+                                         req->id.idiag_dport,
+                                         (struct in6_addr *)req->id.idiag_src,
+                                         req->id.idiag_sport,
+                                         req->id.idiag_if);
+       }
 #endif
        else
-               goto out_nosk;
+               return ERR_PTR(-EINVAL);
 
-       err = -ENOENT;
        if (!sk)
-               goto out_nosk;
+               return ERR_PTR(-ENOENT);
 
-       err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
-       if (err)
-               goto out;
+       if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
+               sock_gen_put(sk);
+               return ERR_PTR(-ENOENT);
+       }
+
+       return sk;
+}
+EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk);
+
+int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
+                           struct sk_buff *in_skb,
+                           const struct nlmsghdr *nlh,
+                           const struct inet_diag_req_v2 *req)
+{
+       struct net *net = sock_net(in_skb->sk);
+       struct sk_buff *rep;
+       struct sock *sk;
+       int err;
+
+       sk = inet_diag_find_one_icsk(net, hashinfo, req);
+       if (IS_ERR(sk))
+               return PTR_ERR(sk);
 
        rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
        if (!rep) {
@@ -394,7 +427,8 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
        err = sk_diag_fill(sk, rep, req,
                           sk_user_ns(NETLINK_CB(in_skb).sk),
                           NETLINK_CB(in_skb).portid,
-                          nlh->nlmsg_seq, 0, nlh);
+                          nlh->nlmsg_seq, 0, nlh,
+                          netlink_net_capable(in_skb, CAP_NET_ADMIN));
        if (err < 0) {
                WARN_ON(err == -EMSGSIZE);
                nlmsg_free(rep);
@@ -409,12 +443,11 @@ out:
        if (sk)
                sock_gen_put(sk);
 
-out_nosk:
        return err;
 }
 EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
 
-static int inet_diag_get_exact(struct sk_buff *in_skb,
+static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb,
                               const struct nlmsghdr *nlh,
                               const struct inet_diag_req_v2 *req)
 {
@@ -424,8 +457,12 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
        handler = inet_diag_lock_handler(req->sdiag_protocol);
        if (IS_ERR(handler))
                err = PTR_ERR(handler);
-       else
+       else if (cmd == SOCK_DIAG_BY_FAMILY)
                err = handler->dump_one(in_skb, nlh, req);
+       else if (cmd == SOCK_DESTROY_BACKPORT && handler->destroy)
+               err = handler->destroy(in_skb, req);
+       else
+               err = -EOPNOTSUPP;
        inet_diag_unlock_handler(handler);
 
        return err;
@@ -529,6 +566,22 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
                        yes = 0;
                        break;
                }
+               case INET_DIAG_BC_DEV_COND: {
+                       u32 ifindex;
+
+                       ifindex = *((const u32 *)(op + 1));
+                       if (ifindex != entry->ifindex)
+                               yes = 0;
+                       break;
+               }
+               case INET_DIAG_BC_MARK_COND: {
+                       struct inet_diag_markcond *cond;
+
+                       cond = (struct inet_diag_markcond *)(op + 1);
+                       if ((entry->mark & cond->mask) != cond->mark)
+                               yes = 0;
+                       break;
+               }
                }
 
                if (yes) {
@@ -571,7 +624,14 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
        entry_fill_addrs(&entry, sk);
        entry.sport = inet->inet_num;
        entry.dport = ntohs(inet->inet_dport);
+       entry.ifindex = sk->sk_bound_dev_if;
        entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
+       if (sk_fullsock(sk))
+               entry.mark = sk->sk_mark;
+       else if (sk->sk_state == TCP_NEW_SYN_RECV)
+               entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark;
+       else
+               entry.mark = 0;
 
        return inet_diag_bc_run(bc, &entry);
 }
@@ -594,6 +654,17 @@ static int valid_cc(const void *bc, int len, int cc)
        return 0;
 }
 
+/* data is u32 ifindex */
+static bool valid_devcond(const struct inet_diag_bc_op *op, int len,
+                         int *min_len)
+{
+       /* Check ifindex space. */
+       *min_len += sizeof(u32);
+       if (len < *min_len)
+               return false;
+
+       return true;
+}
 /* Validate an inet_diag_hostcond. */
 static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
                           int *min_len)
@@ -643,10 +714,25 @@ static bool valid_port_comparison(const struct inet_diag_bc_op *op,
        return true;
 }
 
-static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
+static bool valid_markcond(const struct inet_diag_bc_op *op, int len,
+                          int *min_len)
+{
+       *min_len += sizeof(struct inet_diag_markcond);
+       return len >= *min_len;
+}
+
+static int inet_diag_bc_audit(const struct nlattr *attr,
+                             const struct sk_buff *skb)
 {
-       const void *bc = bytecode;
-       int  len = bytecode_len;
+       bool net_admin = netlink_net_capable(skb, CAP_NET_ADMIN);
+       const void *bytecode, *bc;
+       int bytecode_len, len;
+
+       if (!attr || nla_len(attr) < sizeof(struct inet_diag_bc_op))
+               return -EINVAL;
+
+       bytecode = bc = nla_data(attr);
+       len = bytecode_len = nla_len(attr);
 
        while (len > 0) {
                int min_len = sizeof(struct inet_diag_bc_op);
@@ -658,6 +744,10 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
                        if (!valid_hostcond(bc, len, &min_len))
                                return -EINVAL;
                        break;
+               case INET_DIAG_BC_DEV_COND:
+                       if (!valid_devcond(bc, len, &min_len))
+                               return -EINVAL;
+                       break;
                case INET_DIAG_BC_S_GE:
                case INET_DIAG_BC_S_LE:
                case INET_DIAG_BC_D_GE:
@@ -665,6 +755,12 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
                        if (!valid_port_comparison(bc, len, &min_len))
                                return -EINVAL;
                        break;
+               case INET_DIAG_BC_MARK_COND:
+                       if (!net_admin)
+                               return -EPERM;
+                       if (!valid_markcond(bc, len, &min_len))
+                               return -EINVAL;
+                       break;
                case INET_DIAG_BC_AUTO:
                case INET_DIAG_BC_JMP:
                case INET_DIAG_BC_NOP:
@@ -693,7 +789,8 @@ static int inet_csk_diag_dump(struct sock *sk,
                              struct sk_buff *skb,
                              struct netlink_callback *cb,
                              const struct inet_diag_req_v2 *r,
-                             const struct nlattr *bc)
+                             const struct nlattr *bc,
+                             bool net_admin)
 {
        if (!inet_diag_bc_sk(bc, sk))
                return 0;
@@ -701,7 +798,8 @@ static int inet_csk_diag_dump(struct sock *sk,
        return inet_csk_diag_fill(sk, skb, r,
                                  sk_user_ns(NETLINK_CB(cb->skb).sk),
                                  NETLINK_CB(cb->skb).portid,
-                                 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
+                                 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh,
+                                 net_admin);
 }
 
 static void twsk_build_assert(void)
@@ -737,6 +835,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
        struct net *net = sock_net(skb->sk);
        int i, num, s_i, s_num;
        u32 idiag_states = r->idiag_states;
+       bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
 
        if (idiag_states & TCPF_SYN_RECV)
                idiag_states |= TCPF_NEW_SYN_RECV;
@@ -778,7 +877,8 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
                                    cb->args[3] > 0)
                                        goto next_listen;
 
-                               if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
+                               if (inet_csk_diag_dump(sk, skb, cb, r,
+                                                      bc, net_admin) < 0) {
                                        spin_unlock_bh(&ilb->lock);
                                        goto done;
                                }
@@ -846,7 +946,7 @@ skip_listen_ht:
                                           sk_user_ns(NETLINK_CB(cb->skb).sk),
                                           NETLINK_CB(cb->skb).portid,
                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                          cb->nlh);
+                                          cb->nlh, net_admin);
                        if (res < 0) {
                                spin_unlock_bh(lock);
                                goto done;
@@ -938,7 +1038,7 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
        req.idiag_states = rc->idiag_states;
        req.id = rc->id;
 
-       return inet_diag_get_exact(in_skb, nlh, &req);
+       return inet_diag_cmd_exact(SOCK_DIAG_BY_FAMILY, in_skb, nlh, &req);
 }
 
 static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -953,13 +1053,13 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                if (nlmsg_attrlen(nlh, hdrlen)) {
                        struct nlattr *attr;
+                       int err;
 
                        attr = nlmsg_find_attr(nlh, hdrlen,
                                               INET_DIAG_REQ_BYTECODE);
-                       if (!attr ||
-                           nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
-                           inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
-                               return -EINVAL;
+                       err = inet_diag_bc_audit(attr, skb);
+                       if (err)
+                               return err;
                }
                {
                        struct netlink_dump_control c = {
@@ -972,7 +1072,7 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
        return inet_diag_get_exact_compat(skb, nlh);
 }
 
-static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h)
 {
        int hdrlen = sizeof(struct inet_diag_req_v2);
        struct net *net = sock_net(skb->sk);
@@ -980,16 +1080,17 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
        if (nlmsg_len(h) < hdrlen)
                return -EINVAL;
 
-       if (h->nlmsg_flags & NLM_F_DUMP) {
+       if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
+           h->nlmsg_flags & NLM_F_DUMP) {
                if (nlmsg_attrlen(h, hdrlen)) {
                        struct nlattr *attr;
+                       int err;
 
                        attr = nlmsg_find_attr(h, hdrlen,
                                               INET_DIAG_REQ_BYTECODE);
-                       if (!attr ||
-                           nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
-                           inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
-                               return -EINVAL;
+                       err = inet_diag_bc_audit(attr, skb);
+                       if (err)
+                               return err;
                }
                {
                        struct netlink_dump_control c = {
@@ -999,7 +1100,7 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
                }
        }
 
-       return inet_diag_get_exact(skb, h, nlmsg_data(h));
+       return inet_diag_cmd_exact(h->nlmsg_type, skb, h, nlmsg_data(h));
 }
 
 static
@@ -1050,14 +1151,16 @@ int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
 
 static const struct sock_diag_handler inet_diag_handler = {
        .family = AF_INET,
-       .dump = inet_diag_handler_dump,
+       .dump = inet_diag_handler_cmd,
        .get_info = inet_diag_handler_get_info,
+       .destroy = inet_diag_handler_cmd,
 };
 
 static const struct sock_diag_handler inet6_diag_handler = {
        .family = AF_INET6,
-       .dump = inet_diag_handler_dump,
+       .dump = inet_diag_handler_cmd,
        .get_info = inet_diag_handler_get_info,
+       .destroy = inet_diag_handler_cmd,
 };
 
 int inet_diag_register(const struct inet_diag_handler *h)
index f2ad5216c438006e5f62a862913895bd04f2126a..dbf7f7ee2958a0b2bb01d83eddab0cf919012074 100644 (file)
@@ -1577,7 +1577,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
                           RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
                           ip_reply_arg_flowi_flags(arg),
                           daddr, saddr,
-                          tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
+                          tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
+                          arg->uid);
        security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
        rt = ip_route_output_key(net, &fl4);
        if (IS_ERR(rt))
index aa67e0e64b69dad791056706e986b4d0f15bbb88..0d5278ca47773e467dcc9fcc87ee60e017295b9c 100644 (file)
@@ -791,7 +791,8 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
        flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
                           RT_SCOPE_UNIVERSE, sk->sk_protocol,
-                          inet_sk_flowi_flags(sk), faddr, saddr, 0, 0);
+                          inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
+                          sock_i_uid(sk));
 
        security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
        rt = ip_route_output_flow(net, &fl4, sk);
index 7113bae4e6a0c02726e0e11c33415b6779b7d04b..a9b479a1c4a0ef737ce599dd4323431e3619ab81 100644 (file)
@@ -601,7 +601,8 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                           inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
                           inet_sk_flowi_flags(sk) |
                            (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
-                          daddr, saddr, 0, 0);
+                          daddr, saddr, 0, 0,
+                          sock_i_uid(sk));
 
        if (!saddr && ipc.oif) {
                err = l3mdev_get_saddr(net, ipc.oif, &fl4);
index 7ceb8a574a50a0bfe38eb4796fa48502df5754e3..f75b5658a3a0534d26d882db1fd86fff26d62070 100644 (file)
@@ -501,7 +501,7 @@ void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
 }
 EXPORT_SYMBOL(__ip_select_ident);
 
-static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
+static void __build_flow_key(struct flowi4 *fl4, struct sock *sk,
                             const struct iphdr *iph,
                             int oif, u8 tos,
                             u8 prot, u32 mark, int flow_flags)
@@ -517,11 +517,12 @@ static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
        flowi4_init_output(fl4, oif, mark, tos,
                           RT_SCOPE_UNIVERSE, prot,
                           flow_flags,
-                          iph->daddr, iph->saddr, 0, 0);
+                          iph->daddr, iph->saddr, 0, 0,
+                          sk ? sock_i_uid(sk) : GLOBAL_ROOT_UID);
 }
 
 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
-                              const struct sock *sk)
+                              struct sock *sk)
 {
        const struct iphdr *iph = ip_hdr(skb);
        int oif = skb->dev->ifindex;
@@ -532,7 +533,7 @@ static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
        __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
 }
 
-static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
+static void build_sk_flow_key(struct flowi4 *fl4, struct sock *sk)
 {
        const struct inet_sock *inet = inet_sk(sk);
        const struct ip_options_rcu *inet_opt;
@@ -546,11 +547,12 @@ static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
                           inet_sk_flowi_flags(sk),
-                          daddr, inet->inet_saddr, 0, 0);
+                          daddr, inet->inet_saddr, 0, 0,
+                          sock_i_uid(sk));
        rcu_read_unlock();
 }
 
-static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
+static void ip_rt_build_flow_key(struct flowi4 *fl4, struct sock *sk,
                                 const struct sk_buff *skb)
 {
        if (skb)
@@ -2486,6 +2488,11 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src, u32 table_id,
            nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
                goto nla_put_failure;
 
+       if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
+           nla_put_u32(skb, RTA_UID,
+                       from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
+               goto nla_put_failure;
+
        error = rt->dst.error;
 
        if (rt_is_input_route(rt)) {
@@ -2538,6 +2545,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        int mark;
        struct sk_buff *skb;
        u32 table_id = RT_TABLE_MAIN;
+       kuid_t uid;
 
        err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
        if (err < 0)
@@ -2565,6 +2573,10 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
        iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
        mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
+       if (tb[RTA_UID])
+               uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
+       else
+               uid = (iif ? INVALID_UID : current_uid());
 
        memset(&fl4, 0, sizeof(fl4));
        fl4.daddr = dst;
@@ -2572,6 +2584,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        fl4.flowi4_tos = rtm->rtm_tos;
        fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
        fl4.flowi4_mark = mark;
+       fl4.flowi4_uid = uid;
 
        if (netif_index_is_l3_master(net, fl4.flowi4_oif))
                fl4.flowi4_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF;
index 4cbe9f0a428179d8c35fa5f0a05dd2b445498c11..31b6a4c9db32782edf8687501760f9a28efd9158 100644 (file)
@@ -374,8 +374,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
        flowi4_init_output(&fl4, sk->sk_bound_dev_if, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
                           inet_sk_flowi_flags(sk),
-                          opt->srr ? opt->faddr : ireq->ir_rmt_addr,
-                          ireq->ir_loc_addr, th->source, th->dest);
+                          (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr,
+                          ireq->ir_loc_addr, th->source, th->dest,
+                          sock_i_uid(sk));
        security_req_classify_flow(req, flowi4_to_flowi(&fl4));
        rt = ip_route_output_key(sock_net(sk), &fl4);
        if (IS_ERR(rt)) {
index 70fb352e317fc138ff48067c6c6aa747e3bd3f43..46123369144ffa5e7b4cbfea0355efa867c1d996 100644 (file)
@@ -152,6 +152,21 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
        return ret;
 }
 
+/* Validate changes from /proc interface. */
+static int proc_tcp_default_init_rwnd(struct ctl_table *ctl, int write,
+                                     void __user *buffer,
+                                     size_t *lenp, loff_t *ppos)
+{
+       int old_value = *(int *)ctl->data;
+       int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+       int new_value = *(int *)ctl->data;
+
+       if (write && ret == 0 && (new_value < 3 || new_value > 100))
+               *(int *)ctl->data = old_value;
+
+       return ret;
+}
+
 static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
                                       void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -760,6 +775,13 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_ms_jiffies,
        },
+       {
+               .procname       = "tcp_default_init_rwnd",
+               .data           = &sysctl_tcp_default_init_rwnd,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_tcp_default_init_rwnd
+       },
        {
                .procname       = "icmp_msgs_per_sec",
                .data           = &sysctl_icmp_msgs_per_sec,
diff --git a/net/ipv4/sysfs_net_ipv4.c b/net/ipv4/sysfs_net_ipv4.c
new file mode 100644 (file)
index 0000000..0cbbf10
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * net/ipv4/sysfs_net_ipv4.c
+ *
+ * sysfs-based networking knobs (so we can, unlike with sysctl, control perms)
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <net/tcp.h>
+
+#define CREATE_IPV4_FILE(_name, _var) \
+static ssize_t _name##_show(struct kobject *kobj, \
+                           struct kobj_attribute *attr, char *buf) \
+{ \
+       return sprintf(buf, "%d\n", _var); \
+} \
+static ssize_t _name##_store(struct kobject *kobj, \
+                            struct kobj_attribute *attr, \
+                            const char *buf, size_t count) \
+{ \
+       int val, ret; \
+       ret = sscanf(buf, "%d", &val); \
+       if (ret != 1) \
+               return -EINVAL; \
+       if (val < 0) \
+               return -EINVAL; \
+       _var = val; \
+       return count; \
+} \
+static struct kobj_attribute _name##_attr = \
+       __ATTR(_name, 0644, _name##_show, _name##_store)
+
+CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]);
+CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]);
+CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]);
+
+CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]);
+CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]);
+CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]);
+
+static struct attribute *ipv4_attrs[] = {
+       &tcp_wmem_min_attr.attr,
+       &tcp_wmem_def_attr.attr,
+       &tcp_wmem_max_attr.attr,
+       &tcp_rmem_min_attr.attr,
+       &tcp_rmem_def_attr.attr,
+       &tcp_rmem_max_attr.attr,
+       NULL
+};
+
+static struct attribute_group ipv4_attr_group = {
+       .attrs = ipv4_attrs,
+};
+
+static __init int sysfs_ipv4_init(void)
+{
+       struct kobject *ipv4_kobject;
+       int ret;
+
+       ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj);
+       if (!ipv4_kobject)
+               return -ENOMEM;
+
+       ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group);
+       if (ret) {
+               kobject_put(ipv4_kobject);
+               return ret;
+       }
+
+       return 0;
+}
+
+subsys_initcall(sysfs_ipv4_init);
index 69daa81736f60b9639358ee6151874a6e0bfbf7e..dc173e0d218461ecd7f9bd6902904b641118ca50 100644 (file)
@@ -3084,6 +3084,52 @@ void tcp_done(struct sock *sk)
 }
 EXPORT_SYMBOL_GPL(tcp_done);
 
+int tcp_abort(struct sock *sk, int err)
+{
+       if (!sk_fullsock(sk)) {
+               if (sk->sk_state == TCP_NEW_SYN_RECV) {
+                       struct request_sock *req = inet_reqsk(sk);
+
+                       local_bh_disable();
+                       inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
+                                                         req);
+                       local_bh_enable();
+                       return 0;
+               }
+               sock_gen_put(sk);
+               return -EOPNOTSUPP;
+       }
+
+       /* Don't race with userspace socket closes such as tcp_close. */
+       lock_sock(sk);
+
+       if (sk->sk_state == TCP_LISTEN) {
+               tcp_set_state(sk, TCP_CLOSE);
+               inet_csk_listen_stop(sk);
+       }
+
+       /* Don't race with BH socket closes such as inet_csk_listen_stop. */
+       local_bh_disable();
+       bh_lock_sock(sk);
+
+       if (!sock_flag(sk, SOCK_DEAD)) {
+               sk->sk_err = err;
+               /* This barrier is coupled with smp_rmb() in tcp_poll() */
+               smp_wmb();
+               sk->sk_error_report(sk);
+               if (tcp_need_reset(sk->sk_state))
+                       tcp_send_active_reset(sk, GFP_ATOMIC);
+               tcp_done(sk);
+       }
+
+       bh_unlock_sock(sk);
+       local_bh_enable();
+       release_sock(sk);
+       sock_put(sk);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(tcp_abort);
+
 extern struct tcp_congestion_ops tcp_reno;
 
 static __initdata unsigned long thash_entries;
index b31604086edd6b8aec2eaaf67c755b63caf75743..4d610934fb391c111d822a4d0544334a7b4b858f 100644 (file)
@@ -10,6 +10,8 @@
  */
 
 #include <linux/module.h>
+#include <linux/net.h>
+#include <linux/sock_diag.h>
 #include <linux/inet_diag.h>
 
 #include <linux/tcp.h>
@@ -46,12 +48,29 @@ static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
        return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req);
 }
 
+#ifdef CONFIG_INET_DIAG_DESTROY
+static int tcp_diag_destroy(struct sk_buff *in_skb,
+                           const struct inet_diag_req_v2 *req)
+{
+       struct net *net = sock_net(in_skb->sk);
+       struct sock *sk = inet_diag_find_one_icsk(net, &tcp_hashinfo, req);
+
+       if (IS_ERR(sk))
+               return PTR_ERR(sk);
+
+       return sock_diag_destroy(sk, ECONNABORTED);
+}
+#endif
+
 static const struct inet_diag_handler tcp_diag_handler = {
        .dump            = tcp_diag_dump,
        .dump_one        = tcp_diag_dump_one,
        .idiag_get_info  = tcp_diag_get_info,
        .idiag_type      = IPPROTO_TCP,
        .idiag_info_size = sizeof(struct tcp_info),
+#ifdef CONFIG_INET_DIAG_DESTROY
+       .destroy         = tcp_diag_destroy,
+#endif
 };
 
 static int __init tcp_diag_init(void)
index 7cc0f8aac28f5de0cab88116071a450bc0ad0d41..35e97ff3054a8de4b4988c5ec70ea0f46fbea20d 100644 (file)
@@ -102,6 +102,7 @@ int sysctl_tcp_thin_dupack __read_mostly;
 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
 int sysctl_tcp_early_retrans __read_mostly = 3;
 int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
+int sysctl_tcp_default_init_rwnd __read_mostly = TCP_INIT_CWND * 2;
 
 #define FLAG_DATA              0x01 /* Incoming frame contained data.          */
 #define FLAG_WIN_UPDATE                0x02 /* Incoming ACK was a window update.       */
index b58a38eea0598bfa9cd874f980869c6868b01b2a..b6a48d4a3794ae62f4bad5ac91c06c88c54a6f07 100644 (file)
@@ -2372,6 +2372,7 @@ struct proto tcp_prot = {
        .destroy_cgroup         = tcp_destroy_cgroup,
        .proto_cgroup           = tcp_proto_cgroup,
 #endif
+       .diag_destroy           = tcp_abort,
 };
 EXPORT_SYMBOL(tcp_prot);
 
index 0795647e94c601d47245fd3ea610de8841b92af7..ca3731721d81dac1b73ef94aee51ac2b4e3834e4 100644 (file)
@@ -191,7 +191,7 @@ u32 tcp_default_init_rwnd(u32 mss)
         * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
         * limit when mss is larger than 1460.
         */
-       u32 init_rwnd = TCP_INIT_CWND * 2;
+       u32 init_rwnd = sysctl_tcp_default_init_rwnd;
 
        if (mss > 1460)
                init_rwnd = max((1460 * init_rwnd) / mss, 2U);
index e9513e397c4f30db956d1c92af1ec0d77c1530a6..381a035fcfa1ca57e28ddbd33a94f5ab6344112d 100644 (file)
@@ -1025,7 +1025,8 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
                                   RT_SCOPE_UNIVERSE, sk->sk_protocol,
                                   flow_flags,
-                                  faddr, saddr, dport, inet->inet_sport);
+                                  faddr, saddr, dport, inet->inet_sport,
+                                  sock_i_uid(sk));
 
                if (!saddr && ipc.oif) {
                        err = l3mdev_get_saddr(net, ipc.oif, fl4);
@@ -2264,6 +2265,20 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
 }
 EXPORT_SYMBOL(udp_poll);
 
+int udp_abort(struct sock *sk, int err)
+{
+       lock_sock(sk);
+
+       sk->sk_err = err;
+       sk->sk_error_report(sk);
+       udp_disconnect(sk, 0);
+
+       release_sock(sk);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(udp_abort);
+
 struct proto udp_prot = {
        .name              = "UDP",
        .owner             = THIS_MODULE,
@@ -2295,6 +2310,7 @@ struct proto udp_prot = {
        .compat_getsockopt = compat_udp_getsockopt,
 #endif
        .clear_sk          = sk_prot_clear_portaddr_nulls,
+       .diag_destroy      = udp_abort,
 };
 EXPORT_SYMBOL(udp_prot);
 
index 6116604bf6e8fd64d82b5d9496197cb7e4accef7..092aa60e8b92898a8d3612c447123a64604ac7aa 100644 (file)
@@ -20,7 +20,7 @@
 static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
                        struct netlink_callback *cb,
                        const struct inet_diag_req_v2 *req,
-                       struct nlattr *bc)
+                       struct nlattr *bc, bool net_admin)
 {
        if (!inet_diag_bc_sk(bc, sk))
                return 0;
@@ -28,7 +28,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
        return inet_sk_diag_fill(sk, NULL, skb, req,
                        sk_user_ns(NETLINK_CB(cb->skb).sk),
                        NETLINK_CB(cb->skb).portid,
-                       cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
+                       cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, net_admin);
 }
 
 static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
@@ -75,7 +75,8 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
        err = inet_sk_diag_fill(sk, NULL, rep, req,
                           sk_user_ns(NETLINK_CB(in_skb).sk),
                           NETLINK_CB(in_skb).portid,
-                          nlh->nlmsg_seq, 0, nlh);
+                          nlh->nlmsg_seq, 0, nlh,
+                          netlink_net_capable(in_skb, CAP_NET_ADMIN));
        if (err < 0) {
                WARN_ON(err == -EMSGSIZE);
                kfree_skb(rep);
@@ -98,6 +99,7 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb,
 {
        int num, s_num, slot, s_slot;
        struct net *net = sock_net(skb->sk);
+       bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
 
        s_slot = cb->args[0];
        num = s_num = cb->args[1];
@@ -132,7 +134,7 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb,
                            r->id.idiag_dport)
                                goto next;
 
-                       if (sk_diag_dump(sk, skb, cb, r, bc) < 0) {
+                       if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0) {
                                spin_unlock_bh(&hslot->lock);
                                goto done;
                        }
@@ -165,12 +167,88 @@ static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
        r->idiag_wqueue = sk_wmem_alloc_get(sk);
 }
 
+#ifdef CONFIG_INET_DIAG_DESTROY
+static int __udp_diag_destroy(struct sk_buff *in_skb,
+                             const struct inet_diag_req_v2 *req,
+                             struct udp_table *tbl)
+{
+       struct net *net = sock_net(in_skb->sk);
+       struct sock *sk;
+       int err;
+
+       rcu_read_lock();
+
+       if (req->sdiag_family == AF_INET)
+               sk = __udp4_lib_lookup(net,
+                               req->id.idiag_dst[0], req->id.idiag_dport,
+                               req->id.idiag_src[0], req->id.idiag_sport,
+                               req->id.idiag_if, tbl);
+#if IS_ENABLED(CONFIG_IPV6)
+       else if (req->sdiag_family == AF_INET6) {
+               if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
+                   ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
+                       sk = __udp4_lib_lookup(net,
+                                       req->id.idiag_dst[3], req->id.idiag_dport,
+                                       req->id.idiag_src[3], req->id.idiag_sport,
+                                       req->id.idiag_if, tbl);
+
+               else
+                       sk = __udp6_lib_lookup(net,
+                                       (struct in6_addr *)req->id.idiag_dst,
+                                       req->id.idiag_dport,
+                                       (struct in6_addr *)req->id.idiag_src,
+                                       req->id.idiag_sport,
+                                       req->id.idiag_if, tbl);
+       }
+#endif
+       else {
+               rcu_read_unlock();
+               return -EINVAL;
+       }
+
+       if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+               sk = NULL;
+
+       rcu_read_unlock();
+
+       if (!sk)
+               return -ENOENT;
+
+       if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
+               sock_put(sk);
+               return -ENOENT;
+       }
+
+       err = sock_diag_destroy(sk, ECONNABORTED);
+
+       sock_put(sk);
+
+       return err;
+}
+
+static int udp_diag_destroy(struct sk_buff *in_skb,
+                           const struct inet_diag_req_v2 *req)
+{
+       return __udp_diag_destroy(in_skb, req, &udp_table);
+}
+
+static int udplite_diag_destroy(struct sk_buff *in_skb,
+                               const struct inet_diag_req_v2 *req)
+{
+       return __udp_diag_destroy(in_skb, req, &udplite_table);
+}
+
+#endif
+
 static const struct inet_diag_handler udp_diag_handler = {
        .dump            = udp_diag_dump,
        .dump_one        = udp_diag_dump_one,
        .idiag_get_info  = udp_diag_get_info,
        .idiag_type      = IPPROTO_UDP,
        .idiag_info_size = 0,
+#ifdef CONFIG_INET_DIAG_DESTROY
+       .destroy         = udp_diag_destroy,
+#endif
 };
 
 static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
@@ -192,6 +270,9 @@ static const struct inet_diag_handler udplite_diag_handler = {
        .idiag_get_info  = udp_diag_get_info,
        .idiag_type      = IPPROTO_UDPLITE,
        .idiag_info_size = 0,
+#ifdef CONFIG_INET_DIAG_DESTROY
+       .destroy         = udplite_diag_destroy,
+#endif
 };
 
 static int __init udp_diag_init(void)
index cb8bb5988c03fc7849eb5fbfda8b8bb9d55a7d34..1e541578a66d3eb63d864884156903909c4990e0 100644 (file)
@@ -205,6 +205,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
+       .accept_ra_rt_table     = 0,
        .proxy_ndp              = 0,
        .accept_source_route    = 0,    /* we do not accept RH0 by default. */
        .disable_ipv6           = 0,
@@ -249,6 +250,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
        .accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
+       .accept_ra_rt_table     = 0,
        .proxy_ndp              = 0,
        .accept_source_route    = 0,    /* we do not accept RH0 by default. */
        .disable_ipv6           = 0,
@@ -2146,6 +2148,31 @@ static void  __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmp
                __ipv6_regen_rndid(idev);
 }
 
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table) {
+       /* Determines into what table to put autoconf PIO/RIO/default routes
+        * learned on this device.
+        *
+        * - If 0, use the same table for every device. This puts routes into
+        *   one of RT_TABLE_{PREFIX,INFO,DFLT} depending on the type of route
+        *   (but note that these three are currently all equal to
+        *   RT6_TABLE_MAIN).
+        * - If > 0, use the specified table.
+        * - If < 0, put routes into table dev->ifindex + (-rt_table).
+        */
+       struct inet6_dev *idev = in6_dev_get(dev);
+       u32 table;
+       int sysctl = idev->cnf.accept_ra_rt_table;
+       if (sysctl == 0) {
+               table = default_table;
+       } else if (sysctl > 0) {
+               table = (u32) sysctl;
+       } else {
+               table = (unsigned) dev->ifindex + (-sysctl);
+       }
+       in6_dev_put(idev);
+       return table;
+}
+
 /*
  *     Add prefix route.
  */
@@ -2155,7 +2182,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
                      unsigned long expires, u32 flags)
 {
        struct fib6_config cfg = {
-               .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
+               .fc_table = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX),
                .fc_metric = IP6_RT_PRIO_ADDRCONF,
                .fc_ifindex = dev->ifindex,
                .fc_expires = expires,
@@ -2188,7 +2215,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
        struct fib6_node *fn;
        struct rt6_info *rt = NULL;
        struct fib6_table *table;
-       u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
+       u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX);
 
        table = fib6_get_table(dev_net(dev), tb_id);
        if (!table)
@@ -4665,6 +4692,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
 #endif
 #endif
+       array[DEVCONF_ACCEPT_RA_RT_TABLE] = cnf->accept_ra_rt_table;
        array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
        array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -5631,6 +5659,13 @@ static struct addrconf_sysctl_table
                },
 #endif
 #endif
+               {
+                       .procname       = "accept_ra_rt_table",
+                       .data           = &ipv6_devconf.accept_ra_rt_table,
+                       .maxlen         = sizeof(int),
+                       .mode           = 0644,
+                       .proc_handler   = proc_dointvec,
+               },
                {
                        .procname       = "proxy_ndp",
                        .data           = &ipv6_devconf.proxy_ndp,
index 9f5137cd604e51316c6c3a85ca5423b30ed7ce30..d9b25bd17bf14ed2225ae379c10f6f7a3ddf79e1 100644 (file)
 #include <asm/uaccess.h>
 #include <linux/mroute6.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+       return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+       return 1;
+}
+#endif
+
 MODULE_AUTHOR("Cast of dozens");
 MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
 MODULE_LICENSE("GPL");
@@ -112,6 +126,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
        if (protocol < 0 || protocol >= IPPROTO_MAX)
                return -EINVAL;
 
+       if (!current_has_network())
+               return -EACCES;
+
        /* Look for the requested type/protocol pair. */
 lookup_protocol:
        err = -ESOCKTNOSUPPORT;
@@ -158,8 +175,7 @@ lookup_protocol:
        }
 
        err = -EPERM;
-       if (sock->type == SOCK_RAW && !kern &&
-           !ns_capable(net->user_ns, CAP_NET_RAW))
+       if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
                goto out_rcu_unlock;
 
        sock->ops = answer->ops;
@@ -662,6 +678,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
                fl6.flowi6_mark = sk->sk_mark;
                fl6.fl6_dport = inet->inet_dport;
                fl6.fl6_sport = inet->inet_sport;
+               fl6.flowi6_uid = sock_i_uid(sk);
                security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
                rcu_read_lock();
index 0630a4d5daaabaf9a294c18c398ab569ed74661c..c52b8fc904c978344f9aaafd7d6de3b43108cf2f 100644 (file)
@@ -664,7 +664,7 @@ static int ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (type == NDISC_REDIRECT)
                ip6_redirect(skb, net, skb->dev->ifindex, 0);
        else
-               ip6_update_pmtu(skb, net, info, 0, 0);
+               ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
        xfrm_state_put(x);
 
        return 0;
index 428162155280ca2af782ea9fd9fa26e0d1666d89..183ff87dacf33f2f95f80fc043c939d217d36b68 100644 (file)
@@ -161,6 +161,7 @@ ipv4_connected:
        fl6.flowi6_mark = sk->sk_mark;
        fl6.fl6_dport = inet->inet_dport;
        fl6.fl6_sport = inet->inet_sport;
+       fl6.flowi6_uid = sock_i_uid(sk);
 
        if (!fl6.flowi6_oif)
                fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
index 060a60b2f8a6db074167e389b56893337c887fe9..f921368c32c99b79aec721eb85100b1cc97a4799 100644 (file)
@@ -476,7 +476,7 @@ static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (type == NDISC_REDIRECT)
                ip6_redirect(skb, net, skb->dev->ifindex, 0);
        else
-               ip6_update_pmtu(skb, net, info, 0, 0);
+               ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
        xfrm_state_put(x);
 
        return 0;
index 9508a20fbf61432f561202edbe40b59e63c3489e..840a4388f860dc713431be32f3193332533024f2 100644 (file)
@@ -166,15 +166,15 @@ EXPORT_SYMBOL_GPL(ipv6_find_tlv);
  * to explore inner IPv6 header, eg. ICMPv6 error messages.
  *
  * If target header is found, its offset is set in *offset and return protocol
- * number. Otherwise, return -1.
+ * number. Otherwise, return -ENOENT or -EBADMSG.
  *
  * If the first fragment doesn't contain the final protocol header or
  * NEXTHDR_NONE it is considered invalid.
  *
  * Note that non-1st fragment is special case that "the protocol number
  * of last header" is "next header" field in Fragment header. In this case,
- * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
- * isn't NULL.
+ * *offset is meaningless. If fragoff is not NULL, the fragment offset is
+ * stored in *fragoff; if it is NULL, return -EINVAL.
  *
  * if flags is not NULL and it's a fragment, then the frag flag
  * IP6_FH_F_FRAG will be set. If it's an AH header, the
@@ -253,9 +253,12 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
                                if (target < 0 &&
                                    ((!ipv6_ext_hdr(hp->nexthdr)) ||
                                     hp->nexthdr == NEXTHDR_NONE)) {
-                                       if (fragoff)
+                                       if (fragoff) {
                                                *fragoff = _frag_off;
-                                       return hp->nexthdr;
+                                               return hp->nexthdr;
+                                       } else {
+                                               return -EINVAL;
+                                       }
                                }
                                if (!found)
                                        return -ENOENT;
index 0a37ddc7af51579f56b644ba0e4c3c3a7a2e2bc7..41e5c9520c7d47d8753f3ab374511e7844192c4a 100644 (file)
@@ -92,13 +92,13 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct net *net = dev_net(skb->dev);
 
        if (type == ICMPV6_PKT_TOOBIG)
-               ip6_update_pmtu(skb, net, info, 0, 0);
+               ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
        else if (type == NDISC_REDIRECT)
                ip6_redirect(skb, net, skb->dev->ifindex, 0);
 
        if (!(type & ICMPV6_INFOMSG_MASK))
                if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
-                       ping_err(skb, offset, info);
+                       ping_err(skb, offset, ntohl(info));
 }
 
 static int icmpv6_rcv(struct sk_buff *skb);
index a7ca2cde2ecbcff85c9a6151b4770e3897d16314..897bb6eb575125f378b491f01d0fe2b3c4b24a33 100644 (file)
@@ -86,6 +86,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk,
        fl6->flowi6_mark = ireq->ir_mark;
        fl6->fl6_dport = ireq->ir_rmt_port;
        fl6->fl6_sport = htons(ireq->ir_num);
+       fl6->flowi6_uid = sock_i_uid((struct sock *)sk);
        security_req_classify_flow(req, flowi6_to_flowi(fl6));
 
        dst = ip6_dst_lookup_flow(sk, fl6, final_p);
@@ -134,6 +135,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
        fl6->flowi6_mark = sk->sk_mark;
        fl6->fl6_sport = inet->inet_sport;
        fl6->fl6_dport = inet->inet_dport;
+       fl6->flowi6_uid = sock_i_uid(sk);
        security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
 
        rcu_read_lock();
index 0a8610b33d7980e4b1d5290b4266669ddcdc9e0a..c76ebc7fc52d1169819d9ad78b9c954191d854e1 100644 (file)
@@ -599,7 +599,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (type == NDISC_REDIRECT)
                ip6_redirect(skb, net, skb->dev->ifindex, 0);
        else
-               ip6_update_pmtu(skb, net, info, 0, 0);
+               ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
        xfrm_state_put(x);
 
        return 0;
index 1b9316e1386a96c899c67888fba4618d3004e69a..b247baceb797d004b0b911366fff75498c8a9379 100644 (file)
@@ -76,7 +76,7 @@ static int ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (type == NDISC_REDIRECT)
                ip6_redirect(skb, net, skb->dev->ifindex, 0);
        else
-               ip6_update_pmtu(skb, net, info, 0, 0);
+               ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
        xfrm_state_put(x);
 
        return 0;
index 3e55447b63a43943b5552ed0c9a80bcb31741c4e..fa65e92e9510b3369bddf874f0d1a9a0681e36a2 100644 (file)
@@ -84,7 +84,7 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        struct icmp6hdr user_icmph;
        int addr_type;
        struct in6_addr *daddr;
-       int iif = 0;
+       int oif = 0;
        struct flowi6 fl6;
        int err;
        int hlimit;
@@ -106,25 +106,30 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                if (u->sin6_family != AF_INET6) {
                        return -EAFNOSUPPORT;
                }
-               if (sk->sk_bound_dev_if &&
-                   sk->sk_bound_dev_if != u->sin6_scope_id) {
-                       return -EINVAL;
-               }
                daddr = &(u->sin6_addr);
-               iif = u->sin6_scope_id;
+               if (__ipv6_addr_needs_scope_id(ipv6_addr_type(daddr)))
+                       oif = u->sin6_scope_id;
        } else {
                if (sk->sk_state != TCP_ESTABLISHED)
                        return -EDESTADDRREQ;
                daddr = &sk->sk_v6_daddr;
        }
 
-       if (!iif)
-               iif = sk->sk_bound_dev_if;
+       if (!oif)
+               oif = sk->sk_bound_dev_if;
+
+       if (!oif)
+               oif = np->sticky_pktinfo.ipi6_ifindex;
+
+       if (!oif && ipv6_addr_is_multicast(daddr))
+               oif = np->mcast_oif;
+       else if (!oif)
+               oif = np->ucast_oif;
 
        addr_type = ipv6_addr_type(daddr);
-       if (__ipv6_addr_needs_scope_id(addr_type) && !iif)
-               return -EINVAL;
-       if (addr_type & IPV6_ADDR_MAPPED)
+       if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) ||
+           (addr_type & IPV6_ADDR_MAPPED) ||
+           (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if))
                return -EINVAL;
 
        /* TODO: use ip6_datagram_send_ctl to get options from cmsg */
@@ -134,16 +139,13 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        fl6.flowi6_proto = IPPROTO_ICMPV6;
        fl6.saddr = np->saddr;
        fl6.daddr = *daddr;
+       fl6.flowi6_oif = oif;
        fl6.flowi6_mark = sk->sk_mark;
+       fl6.flowi6_uid = sock_i_uid(sk);
        fl6.fl6_icmp_type = user_icmph.icmp6_type;
        fl6.fl6_icmp_code = user_icmph.icmp6_code;
        security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
-       if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
-               fl6.flowi6_oif = np->mcast_oif;
-       else if (!fl6.flowi6_oif)
-               fl6.flowi6_oif = np->ucast_oif;
-
        dst = ip6_sk_dst_lookup_flow(sk, &fl6,  daddr);
        if (IS_ERR(dst))
                return PTR_ERR(dst);
@@ -155,11 +157,6 @@ int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                goto dst_err_out;
        }
 
-       if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
-               fl6.flowi6_oif = np->mcast_oif;
-       else if (!fl6.flowi6_oif)
-               fl6.flowi6_oif = np->ucast_oif;
-
        pfh.icmph.type = user_icmph.icmp6_type;
        pfh.icmph.code = user_icmph.icmp6_code;
        pfh.icmph.checksum = 0;
index 99140986e88716529b90f082bf2b7011a2db8c15..d9ad71a01b4cac79b6fbbf536f3d1938bd398fea 100644 (file)
@@ -768,6 +768,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        memset(&fl6, 0, sizeof(fl6));
 
        fl6.flowi6_mark = sk->sk_mark;
+       fl6.flowi6_uid = sock_i_uid(sk);
 
        if (sin6) {
                if (addr_len < SIN6_LEN_RFC2133)
index dbffc9de184b1760752ccd62d915fb0273eed99b..46476a3af2ad68682c964060004ace0d7097b20b 100644 (file)
@@ -99,13 +99,12 @@ static void         rt6_dst_from_metrics_check(struct rt6_info *rt);
 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
-static struct rt6_info *rt6_add_route_info(struct net *net,
+static struct rt6_info *rt6_add_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
-                                          const struct in6_addr *gwaddr, int ifindex,
-                                          unsigned int pref);
-static struct rt6_info *rt6_get_route_info(struct net *net,
+                                          const struct in6_addr *gwaddr, unsigned int pref);
+static struct rt6_info *rt6_get_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
-                                          const struct in6_addr *gwaddr, int ifindex);
+                                          const struct in6_addr *gwaddr);
 #endif
 
 struct uncached_list {
@@ -755,7 +754,6 @@ static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
                  const struct in6_addr *gwaddr)
 {
-       struct net *net = dev_net(dev);
        struct route_info *rinfo = (struct route_info *) opt;
        struct in6_addr prefix_buf, *prefix;
        unsigned int pref;
@@ -800,8 +798,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
        if (rinfo->prefix_len == 0)
                rt = rt6_get_dflt_router(gwaddr, dev);
        else
-               rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
-                                       gwaddr, dev->ifindex);
+               rt = rt6_get_route_info(dev, prefix, rinfo->prefix_len, gwaddr);
 
        if (rt && !lifetime) {
                ip6_del_rt(rt);
@@ -809,8 +806,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
        }
 
        if (!rt && lifetime)
-               rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
-                                       pref);
+               rt = rt6_add_route_info(dev, prefix, rinfo->prefix_len, gwaddr, pref);
        else if (rt)
                rt->rt6i_flags = RTF_ROUTEINFO |
                                 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
@@ -1386,7 +1382,7 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
 }
 
 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
-                    int oif, u32 mark)
+                    int oif, u32 mark, kuid_t uid)
 {
        const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
        struct dst_entry *dst;
@@ -1398,6 +1394,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
        fl6.daddr = iph->daddr;
        fl6.saddr = iph->saddr;
        fl6.flowlabel = ip6_flowinfo(iph);
+       fl6.flowi6_uid = uid;
 
        dst = ip6_route_output(net, NULL, &fl6);
        if (!dst->error)
@@ -1409,7 +1406,7 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
 {
        ip6_update_pmtu(skb, sock_net(sk), mtu,
-                       sk->sk_bound_dev_if, sk->sk_mark);
+                       sk->sk_bound_dev_if, sk->sk_mark, sock_i_uid(sk));
 }
 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
 
@@ -2248,15 +2245,16 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
 }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
-static struct rt6_info *rt6_get_route_info(struct net *net,
+static struct rt6_info *rt6_get_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
-                                          const struct in6_addr *gwaddr, int ifindex)
+                                          const struct in6_addr *gwaddr)
 {
        struct fib6_node *fn;
        struct rt6_info *rt = NULL;
        struct fib6_table *table;
 
-       table = fib6_get_table(net, RT6_TABLE_INFO);
+       table = fib6_get_table(dev_net(dev),
+                              addrconf_rt_table(dev, RT6_TABLE_INFO));
        if (!table)
                return NULL;
 
@@ -2266,7 +2264,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
                goto out;
 
        for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
-               if (rt->dst.dev->ifindex != ifindex)
+               if (rt->dst.dev->ifindex != dev->ifindex)
                        continue;
                if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
                        continue;
@@ -2280,23 +2278,22 @@ out:
        return rt;
 }
 
-static struct rt6_info *rt6_add_route_info(struct net *net,
+static struct rt6_info *rt6_add_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
-                                          const struct in6_addr *gwaddr, int ifindex,
-                                          unsigned int pref)
+                                          const struct in6_addr *gwaddr, unsigned int pref)
 {
        struct fib6_config cfg = {
                .fc_metric      = IP6_RT_PRIO_USER,
-               .fc_ifindex     = ifindex,
+               .fc_ifindex     = dev->ifindex,
                .fc_dst_len     = prefixlen,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
                                  RTF_UP | RTF_PREF(pref),
                .fc_nlinfo.portid = 0,
                .fc_nlinfo.nlh = NULL,
-               .fc_nlinfo.nl_net = net,
+               .fc_nlinfo.nl_net = dev_net(dev),
        };
 
-       cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO;
+       cfg.fc_table = l3mdev_fib_table_by_index(dev_net(dev), dev->ifindex) ? : addrconf_rt_table(dev, RT6_TABLE_INFO);
        cfg.fc_dst = *prefix;
        cfg.fc_gateway = *gwaddr;
 
@@ -2306,7 +2303,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
 
        ip6_route_add(&cfg);
 
-       return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
+       return rt6_get_route_info(dev, prefix, prefixlen, gwaddr);
 }
 #endif
 
@@ -2315,7 +2312,8 @@ struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_dev
        struct rt6_info *rt;
        struct fib6_table *table;
 
-       table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
+       table = fib6_get_table(dev_net(dev),
+                              addrconf_rt_table(dev, RT6_TABLE_MAIN));
        if (!table)
                return NULL;
 
@@ -2337,7 +2335,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
                                     unsigned int pref)
 {
        struct fib6_config cfg = {
-               .fc_table       = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
+               .fc_table       = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_DFLT),
                .fc_metric      = IP6_RT_PRIO_USER,
                .fc_ifindex     = dev->ifindex,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
@@ -2354,28 +2352,17 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
        return rt6_get_dflt_router(gwaddr, dev);
 }
 
-void rt6_purge_dflt_routers(struct net *net)
-{
-       struct rt6_info *rt;
-       struct fib6_table *table;
 
-       /* NOTE: Keep consistent with rt6_get_dflt_router */
-       table = fib6_get_table(net, RT6_TABLE_DFLT);
-       if (!table)
-               return;
+int rt6_addrconf_purge(struct rt6_info *rt, void *arg) {
+       if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
+           (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2))
+               return -1;
+       return 0;
+}
 
-restart:
-       read_lock_bh(&table->tb6_lock);
-       for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
-               if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
-                   (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
-                       dst_hold(&rt->dst);
-                       read_unlock_bh(&table->tb6_lock);
-                       ip6_del_rt(rt);
-                       goto restart;
-               }
-       }
-       read_unlock_bh(&table->tb6_lock);
+void rt6_purge_dflt_routers(struct net *net)
+{
+       fib6_clean_all(net, rt6_addrconf_purge, NULL);
 }
 
 static void rtmsg_to_fib6_config(struct net *net,
@@ -2700,6 +2687,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
        [RTA_PREF]              = { .type = NLA_U8 },
        [RTA_ENCAP_TYPE]        = { .type = NLA_U16 },
        [RTA_ENCAP]             = { .type = NLA_NESTED },
+       [RTA_UID]               = { .type = NLA_U32 },
 };
 
 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -3263,6 +3251,11 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        if (tb[RTA_MARK])
                fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
 
+       if (tb[RTA_UID])
+               fl6.flowi6_uid = make_kuid(current_user_ns(),
+                                          nla_get_u32(tb[RTA_UID]));
+       else
+               fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
        if (iif) {
                struct net_device *dev;
                int flags = 0;
index eaf7ac496d506937cd52944bbdc17da720110b59..a22015fab95eb69b7e7e17a22c0aaacf9cefd92d 100644 (file)
@@ -228,6 +228,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                fl6.flowi6_mark = ireq->ir_mark;
                fl6.fl6_dport = ireq->ir_rmt_port;
                fl6.fl6_sport = inet_sk(sk)->inet_sport;
+               fl6.flowi6_uid = sock_i_uid(sk);
                security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 
                dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
index 5f581616bf6a068269587077879cf56b7e637cb5..50e71e784e850dc787d38f4454a596c1d2546c90 100644 (file)
@@ -234,6 +234,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        fl6.flowi6_mark = sk->sk_mark;
        fl6.fl6_dport = usin->sin6_port;
        fl6.fl6_sport = inet->inet_sport;
+       fl6.flowi6_uid = sock_i_uid(sk);
 
        opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
        final_p = fl6_update_dst(&fl6, opt, &final);
@@ -1904,6 +1905,7 @@ struct proto tcpv6_prot = {
        .proto_cgroup           = tcp_proto_cgroup,
 #endif
        .clear_sk               = tcp_v6_clear_sk,
+       .diag_destroy           = tcp_abort,
 };
 
 static const struct inet6_protocol tcpv6_protocol = {
index dfa85e7264dfd54d38ccddb0920e09e789ef7643..1207379c1cce3a214afef4c2a39230360aa4bdc7 100644 (file)
@@ -1244,6 +1244,7 @@ do_udp_sendmsg:
                fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
 
        fl6.flowi6_mark = sk->sk_mark;
+       fl6.flowi6_uid = sock_i_uid(sk);
 
        if (msg->msg_controllen) {
                opt = &opt_space;
@@ -1552,6 +1553,7 @@ struct proto udpv6_prot = {
        .compat_getsockopt = compat_udpv6_getsockopt,
 #endif
        .clear_sk          = udp_v6_clear_sk,
+       .diag_destroy      = udp_abort,
 };
 
 static struct inet_protosw udpv6_protosw = {
index c6be0b4f405888489b21cee009b267d95f626c34..b6dc2d7cd6501ac01a04a17e47584ae0dc36ae80 100644 (file)
@@ -530,7 +530,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
        const u8 *target_addr, *orig_addr;
        const u8 *da;
        u8 target_flags, ttl, flags;
-       u32 orig_sn, target_sn, lifetime, target_metric;
+       u32 orig_sn, target_sn, lifetime, target_metric = 0;
        bool reply = false;
        bool forward = true;
        bool root_is_gate;
index 4692782b528053a408b15f0daccaf6f6cb3fc10e..1959548b11618d7b857ebb81d0919276f3fa1e82 100644 (file)
@@ -1278,6 +1278,8 @@ config NETFILTER_XT_MATCH_OWNER
        based on who created the socket: the user or group. It is also
        possible to check whether a socket actually exists.
 
+       Conflicts with '"quota, tag, uid" match'
+
 config NETFILTER_XT_MATCH_POLICY
        tristate 'IPsec "policy" match support'
        depends on XFRM
@@ -1311,6 +1313,22 @@ config NETFILTER_XT_MATCH_PKTTYPE
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_QTAGUID
+       bool '"quota, tag, owner" match and stats support'
+        depends on NETFILTER_XT_MATCH_SOCKET
+       depends on NETFILTER_XT_MATCH_OWNER=n
+       help
+         This option replaces the `owner' match. In addition to matching
+         on uid, it keeps stats based on a tag assigned to a socket.
+         The full tag is comprised of a UID and an accounting tag.
+         The tags are assignable to sockets from user space (e.g. a download
+         manager can assign the socket to another UID for accounting).
+         Stats and control are done via /proc/net/xt_qtaguid/.
+         It replaces owner as it takes the same arguments, but should
+         really be recognized by the iptables tool.
+
+         If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_QUOTA
        tristate '"quota" match support'
        depends on NETFILTER_ADVANCED
@@ -1321,6 +1339,29 @@ config NETFILTER_XT_MATCH_QUOTA
          If you want to compile it as a module, say M here and read
          <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
 
+config NETFILTER_XT_MATCH_QUOTA2
+       tristate '"quota2" match support'
+       depends on NETFILTER_ADVANCED
+       help
+         This option adds a `quota2' match, which allows to match on a
+         byte counter correctly and not per CPU.
+         It allows naming the quotas.
+         This is based on http://xtables-addons.git.sourceforge.net
+
+         If you want to compile it as a module, say M here and read
+         <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_QUOTA2_LOG
+       bool '"quota2" Netfilter LOG support'
+       depends on NETFILTER_XT_MATCH_QUOTA2
+       default n
+       help
+         This option allows `quota2' to log ONCE when a quota limit
+         is passed. It logs via NETLINK using the NETLINK_NFLOG family.
+         It logs similarly to how ipt_ULOG would without data.
+
+         If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_RATEEST
        tristate '"rateest" match support'
        depends on NETFILTER_ADVANCED
index 7638c36b498ccd00618bd073252331147912d125..ad6a8aa63b1fe788a3836db519a4a19b147f2285 100644 (file)
@@ -156,7 +156,9 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CGROUP) += xt_cgroup.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid_print.o xt_qtaguid.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o
index 77afe913d03db73c65631fba8c42732cb3f33f32..9adedba78eeaccafce9df7c9f851b7f8c19f7fe8 100644 (file)
@@ -326,10 +326,12 @@ replay:
                nlh = nlmsg_hdr(skb);
                err = 0;
 
-               if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
-                   skb->len < nlh->nlmsg_len) {
-                       err = -EINVAL;
-                       goto ack;
+               if (nlh->nlmsg_len < NLMSG_HDRLEN ||
+                   skb->len < nlh->nlmsg_len ||
+                   nlmsg_len(nlh) < sizeof(struct nfgenmsg)) {
+                       nfnl_err_reset(&err_list);
+                       status |= NFNL_BATCH_FAILURE;
+                       goto done;
                }
 
                /* Only requests are handled by the kernel */
index 29d2c31f406ca585d5f0eb1f08bcaf26d8364053..0975c993a94e598078647e607a4f477c8306b85c 100644 (file)
@@ -5,6 +5,7 @@
  * After timer expires a kevent will be sent.
  *
  * Copyright (C) 2004, 2010 Nokia Corporation
+ *
  * Written by Timo Teras <ext-timo.teras@nokia.com>
  *
  * Converted to x_tables and reworked for upstream inclusion
 #include <linux/netfilter/xt_IDLETIMER.h>
 #include <linux/kdev_t.h>
 #include <linux/kobject.h>
+#include <linux/skbuff.h>
 #include <linux/workqueue.h>
 #include <linux/sysfs.h>
+#include <linux/rtc.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/suspend.h>
+#include <linux/notifier.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
 
 struct idletimer_tg_attr {
        struct attribute attr;
@@ -55,14 +64,110 @@ struct idletimer_tg {
        struct kobject *kobj;
        struct idletimer_tg_attr attr;
 
+       struct timespec delayed_timer_trigger;
+       struct timespec last_modified_timer;
+       struct timespec last_suspend_time;
+       struct notifier_block pm_nb;
+
+       int timeout;
        unsigned int refcnt;
+       bool work_pending;
+       bool send_nl_msg;
+       bool active;
+       uid_t uid;
 };
 
 static LIST_HEAD(idletimer_tg_list);
 static DEFINE_MUTEX(list_mutex);
+static DEFINE_SPINLOCK(timestamp_lock);
 
 static struct kobject *idletimer_tg_kobj;
 
+static bool check_for_delayed_trigger(struct idletimer_tg *timer,
+               struct timespec *ts)
+{
+       bool state;
+       struct timespec temp;
+       spin_lock_bh(&timestamp_lock);
+       timer->work_pending = false;
+       if ((ts->tv_sec - timer->last_modified_timer.tv_sec) > timer->timeout ||
+                       timer->delayed_timer_trigger.tv_sec != 0) {
+               state = false;
+               temp.tv_sec = timer->timeout;
+               temp.tv_nsec = 0;
+               if (timer->delayed_timer_trigger.tv_sec != 0) {
+                       temp = timespec_add(timer->delayed_timer_trigger, temp);
+                       ts->tv_sec = temp.tv_sec;
+                       ts->tv_nsec = temp.tv_nsec;
+                       timer->delayed_timer_trigger.tv_sec = 0;
+                       timer->work_pending = true;
+                       schedule_work(&timer->work);
+               } else {
+                       temp = timespec_add(timer->last_modified_timer, temp);
+                       ts->tv_sec = temp.tv_sec;
+                       ts->tv_nsec = temp.tv_nsec;
+               }
+       } else {
+               state = timer->active;
+       }
+       spin_unlock_bh(&timestamp_lock);
+       return state;
+}
+
+static void notify_netlink_uevent(const char *iface, struct idletimer_tg *timer)
+{
+       char iface_msg[NLMSG_MAX_SIZE];
+       char state_msg[NLMSG_MAX_SIZE];
+       char timestamp_msg[NLMSG_MAX_SIZE];
+       char uid_msg[NLMSG_MAX_SIZE];
+       char *envp[] = { iface_msg, state_msg, timestamp_msg, uid_msg, NULL };
+       int res;
+       struct timespec ts;
+       uint64_t time_ns;
+       bool state;
+
+       res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
+                      iface);
+       if (NLMSG_MAX_SIZE <= res) {
+               pr_err("message too long (%d)", res);
+               return;
+       }
+
+       get_monotonic_boottime(&ts);
+       state = check_for_delayed_trigger(timer, &ts);
+       res = snprintf(state_msg, NLMSG_MAX_SIZE, "STATE=%s",
+                       state ? "active" : "inactive");
+
+       if (NLMSG_MAX_SIZE <= res) {
+               pr_err("message too long (%d)", res);
+               return;
+       }
+
+       if (state) {
+               res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=%u", timer->uid);
+               if (NLMSG_MAX_SIZE <= res)
+                       pr_err("message too long (%d)", res);
+       } else {
+               res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=");
+               if (NLMSG_MAX_SIZE <= res)
+                       pr_err("message too long (%d)", res);
+       }
+
+       time_ns = timespec_to_ns(&ts);
+       res = snprintf(timestamp_msg, NLMSG_MAX_SIZE, "TIME_NS=%llu", time_ns);
+       if (NLMSG_MAX_SIZE <= res) {
+               timestamp_msg[0] = '\0';
+               pr_err("message too long (%d)", res);
+       }
+
+       pr_debug("putting nlmsg: <%s> <%s> <%s> <%s>\n", iface_msg, state_msg,
+                timestamp_msg, uid_msg);
+       kobject_uevent_env(idletimer_tg_kobj, KOBJ_CHANGE, envp);
+       return;
+
+
+}
+
 static
 struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
 {
@@ -83,6 +188,7 @@ static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
 {
        struct idletimer_tg *timer;
        unsigned long expires = 0;
+       unsigned long now = jiffies;
 
        mutex_lock(&list_mutex);
 
@@ -92,11 +198,15 @@ static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
 
        mutex_unlock(&list_mutex);
 
-       if (time_after(expires, jiffies))
+       if (time_after(expires, now))
                return sprintf(buf, "%u\n",
-                              jiffies_to_msecs(expires - jiffies) / 1000);
+                              jiffies_to_msecs(expires - now) / 1000);
 
-       return sprintf(buf, "0\n");
+       if (timer->send_nl_msg)
+               return sprintf(buf, "0 %d\n",
+                       jiffies_to_msecs(now - expires) / 1000);
+       else
+               return sprintf(buf, "0\n");
 }
 
 static void idletimer_tg_work(struct work_struct *work)
@@ -105,6 +215,9 @@ static void idletimer_tg_work(struct work_struct *work)
                                                  work);
 
        sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
+
+       if (timer->send_nl_msg)
+               notify_netlink_uevent(timer->attr.attr.name, timer);
 }
 
 static void idletimer_tg_expired(unsigned long data)
@@ -112,8 +225,55 @@ static void idletimer_tg_expired(unsigned long data)
        struct idletimer_tg *timer = (struct idletimer_tg *) data;
 
        pr_debug("timer %s expired\n", timer->attr.attr.name);
-
+       spin_lock_bh(&timestamp_lock);
+       timer->active = false;
+       timer->work_pending = true;
        schedule_work(&timer->work);
+       spin_unlock_bh(&timestamp_lock);
+}
+
+static int idletimer_resume(struct notifier_block *notifier,
+               unsigned long pm_event, void *unused)
+{
+       struct timespec ts;
+       unsigned long time_diff, now = jiffies;
+       struct idletimer_tg *timer = container_of(notifier,
+                       struct idletimer_tg, pm_nb);
+       if (!timer)
+               return NOTIFY_DONE;
+       switch (pm_event) {
+       case PM_SUSPEND_PREPARE:
+               get_monotonic_boottime(&timer->last_suspend_time);
+               break;
+       case PM_POST_SUSPEND:
+               spin_lock_bh(&timestamp_lock);
+               if (!timer->active) {
+                       spin_unlock_bh(&timestamp_lock);
+                       break;
+               }
+               /* since jiffies are not updated when suspended now represents
+                * the time it would have suspended */
+               if (time_after(timer->timer.expires, now)) {
+                       get_monotonic_boottime(&ts);
+                       ts = timespec_sub(ts, timer->last_suspend_time);
+                       time_diff = timespec_to_jiffies(&ts);
+                       if (timer->timer.expires > (time_diff + now)) {
+                               mod_timer_pending(&timer->timer,
+                                               (timer->timer.expires - time_diff));
+                       } else {
+                               del_timer(&timer->timer);
+                               timer->timer.expires = 0;
+                               timer->active = false;
+                               timer->work_pending = true;
+                               schedule_work(&timer->work);
+                       }
+               }
+               spin_unlock_bh(&timestamp_lock);
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_DONE;
 }
 
 static int idletimer_tg_create(struct idletimer_tg_info *info)
@@ -146,6 +306,21 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
        setup_timer(&info->timer->timer, idletimer_tg_expired,
                    (unsigned long) info->timer);
        info->timer->refcnt = 1;
+       info->timer->send_nl_msg = (info->send_nl_msg == 0) ? false : true;
+       info->timer->active = true;
+       info->timer->timeout = info->timeout;
+
+       info->timer->delayed_timer_trigger.tv_sec = 0;
+       info->timer->delayed_timer_trigger.tv_nsec = 0;
+       info->timer->work_pending = false;
+       info->timer->uid = 0;
+       get_monotonic_boottime(&info->timer->last_modified_timer);
+
+       info->timer->pm_nb.notifier_call = idletimer_resume;
+       ret = register_pm_notifier(&info->timer->pm_nb);
+       if (ret)
+               printk(KERN_WARNING "[%s] Failed to register pm notifier %d\n",
+                               __func__, ret);
 
        mod_timer(&info->timer->timer,
                  msecs_to_jiffies(info->timeout * 1000) + jiffies);
@@ -162,6 +337,42 @@ out:
        return ret;
 }
 
+static void reset_timer(const struct idletimer_tg_info *info,
+                       struct sk_buff *skb)
+{
+       unsigned long now = jiffies;
+       struct idletimer_tg *timer = info->timer;
+       bool timer_prev;
+
+       spin_lock_bh(&timestamp_lock);
+       timer_prev = timer->active;
+       timer->active = true;
+       /* timer_prev is used to guard overflow problem in time_before*/
+       if (!timer_prev || time_before(timer->timer.expires, now)) {
+               pr_debug("Starting Checkentry timer (Expired, Jiffies): %lu, %lu\n",
+                               timer->timer.expires, now);
+
+               /* Stores the uid resposible for waking up the radio */
+               if (skb && (skb->sk)) {
+                       timer->uid = from_kuid_munged(current_user_ns(),
+                                               sock_i_uid(skb->sk));
+               }
+
+               /* checks if there is a pending inactive notification*/
+               if (timer->work_pending)
+                       timer->delayed_timer_trigger = timer->last_modified_timer;
+               else {
+                       timer->work_pending = true;
+                       schedule_work(&timer->work);
+               }
+       }
+
+       get_monotonic_boottime(&timer->last_modified_timer);
+       mod_timer(&timer->timer,
+                       msecs_to_jiffies(info->timeout * 1000) + now);
+       spin_unlock_bh(&timestamp_lock);
+}
+
 /*
  * The actual xt_tables plugin.
  */
@@ -169,15 +380,23 @@ static unsigned int idletimer_tg_target(struct sk_buff *skb,
                                         const struct xt_action_param *par)
 {
        const struct idletimer_tg_info *info = par->targinfo;
+       unsigned long now = jiffies;
 
        pr_debug("resetting timer %s, timeout period %u\n",
                 info->label, info->timeout);
 
        BUG_ON(!info->timer);
 
-       mod_timer(&info->timer->timer,
-                 msecs_to_jiffies(info->timeout * 1000) + jiffies);
+       info->timer->active = true;
 
+       if (time_before(info->timer->timer.expires, now)) {
+               schedule_work(&info->timer->work);
+               pr_debug("Starting timer %s (Expired, Jiffies): %lu, %lu\n",
+                        info->label, info->timer->timer.expires, now);
+       }
+
+       /* TODO: Avoid modifying timers on each packet */
+       reset_timer(info, skb);
        return XT_CONTINUE;
 }
 
@@ -186,7 +405,7 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
        struct idletimer_tg_info *info = par->targinfo;
        int ret;
 
-       pr_debug("checkentry targinfo%s\n", info->label);
+       pr_debug("checkentry targinfo %s\n", info->label);
 
        if (info->timeout == 0) {
                pr_debug("timeout value is zero\n");
@@ -205,9 +424,7 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
        info->timer = __idletimer_tg_find_by_label(info->label);
        if (info->timer) {
                info->timer->refcnt++;
-               mod_timer(&info->timer->timer,
-                         msecs_to_jiffies(info->timeout * 1000) + jiffies);
-
+               reset_timer(info, NULL);
                pr_debug("increased refcnt of timer %s to %u\n",
                         info->label, info->timer->refcnt);
        } else {
@@ -220,6 +437,7 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
        }
 
        mutex_unlock(&list_mutex);
+
        return 0;
 }
 
@@ -237,11 +455,12 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
                list_del(&info->timer->entry);
                del_timer_sync(&info->timer->timer);
                sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+               unregister_pm_notifier(&info->timer->pm_nb);
                kfree(info->timer->attr.attr.name);
                kfree(info->timer);
        } else {
                pr_debug("decreased refcnt of timer %s to %u\n",
-                        info->label, info->timer->refcnt);
+               info->label, info->timer->refcnt);
        }
 
        mutex_unlock(&list_mutex);
@@ -249,6 +468,7 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
 
 static struct xt_target idletimer_tg __read_mostly = {
        .name           = "IDLETIMER",
+       .revision       = 1,
        .family         = NFPROTO_UNSPEC,
        .target         = idletimer_tg_target,
        .targetsize     = sizeof(struct idletimer_tg_info),
@@ -314,3 +534,4 @@ MODULE_DESCRIPTION("Xtables: idle time monitor");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("ipt_IDLETIMER");
 MODULE_ALIAS("ip6t_IDLETIMER");
+MODULE_ALIAS("arpt_IDLETIMER");
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
new file mode 100644 (file)
index 0000000..3bf0c59
--- /dev/null
@@ -0,0 +1,3032 @@
+/*
+ * Kernel iptables module to track stats for packets based on user tags.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * There are run-time debug flags enabled via the debug_mask module param, or
+ * via the DEFAULT_DEBUG_MASK. See xt_qtaguid_internal.h.
+ */
+#define DEBUG
+
+#include <linux/file.h>
+#include <linux/inetdevice.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_qtaguid.h>
+#include <linux/ratelimit.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/addrconf.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#endif
+
+#include <linux/netfilter/xt_socket.h>
+#include "xt_qtaguid_internal.h"
+#include "xt_qtaguid_print.h"
+#include "../../fs/proc/internal.h"
+
+/*
+ * We only use the xt_socket funcs within a similar context to avoid unexpected
+ * return values.
+ */
+#define XT_SOCKET_SUPPORTED_HOOKS \
+       ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN))
+
+
+static const char *module_procdirname = "xt_qtaguid";
+static struct proc_dir_entry *xt_qtaguid_procdir;
+
+static unsigned int proc_iface_perms = S_IRUGO;
+module_param_named(iface_perms, proc_iface_perms, uint, S_IRUGO | S_IWUSR);
+
+static struct proc_dir_entry *xt_qtaguid_stats_file;
+static unsigned int proc_stats_perms = S_IRUGO;
+module_param_named(stats_perms, proc_stats_perms, uint, S_IRUGO | S_IWUSR);
+
+static struct proc_dir_entry *xt_qtaguid_ctrl_file;
+
+/* Everybody can write. But proc_ctrl_write_limited is true by default which
+ * limits what can be controlled. See the can_*() functions.
+ */
+static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUGO;
+module_param_named(ctrl_perms, proc_ctrl_perms, uint, S_IRUGO | S_IWUSR);
+
+/* Limited by default, so the gid of the ctrl and stats proc entries
+ * will limit what can be done. See the can_*() functions.
+ */
+static bool proc_stats_readall_limited = true;
+static bool proc_ctrl_write_limited = true;
+
+module_param_named(stats_readall_limited, proc_stats_readall_limited, bool,
+                  S_IRUGO | S_IWUSR);
+module_param_named(ctrl_write_limited, proc_ctrl_write_limited, bool,
+                  S_IRUGO | S_IWUSR);
+
+/*
+ * Limit the number of active tags (via socket tags) for a given UID.
+ * Multiple processes could share the UID.
+ */
+static int max_sock_tags = DEFAULT_MAX_SOCK_TAGS;
+module_param(max_sock_tags, int, S_IRUGO | S_IWUSR);
+
+/*
+ * After the kernel has initiallized this module, it is still possible
+ * to make it passive.
+ * Setting passive to Y:
+ *  - the iface stats handling will not act on notifications.
+ *  - iptables matches will never match.
+ *  - ctrl commands silently succeed.
+ *  - stats are always empty.
+ * This is mostly usefull when a bug is suspected.
+ */
+static bool module_passive;
+module_param_named(passive, module_passive, bool, S_IRUGO | S_IWUSR);
+
+/*
+ * Control how qtaguid data is tracked per proc/uid.
+ * Setting tag_tracking_passive to Y:
+ *  - don't create proc specific structs to track tags
+ *  - don't check that active tag stats exceed some limits.
+ *  - don't clean up socket tags on process exits.
+ * This is mostly usefull when a bug is suspected.
+ */
+static bool qtu_proc_handling_passive;
+module_param_named(tag_tracking_passive, qtu_proc_handling_passive, bool,
+                  S_IRUGO | S_IWUSR);
+
+#define QTU_DEV_NAME "xt_qtaguid"
+
+uint qtaguid_debug_mask = DEFAULT_DEBUG_MASK;
+module_param_named(debug_mask, qtaguid_debug_mask, uint, S_IRUGO | S_IWUSR);
+
+/*---------------------------------------------------------------------------*/
+static const char *iface_stat_procdirname = "iface_stat";
+static struct proc_dir_entry *iface_stat_procdir;
+/*
+ * The iface_stat_all* will go away once userspace gets use to the new fields
+ * that have a format line.
+ */
+static const char *iface_stat_all_procfilename = "iface_stat_all";
+static struct proc_dir_entry *iface_stat_all_procfile;
+static const char *iface_stat_fmt_procfilename = "iface_stat_fmt";
+static struct proc_dir_entry *iface_stat_fmt_procfile;
+
+
+static LIST_HEAD(iface_stat_list);
+static DEFINE_SPINLOCK(iface_stat_list_lock);
+
+static struct rb_root sock_tag_tree = RB_ROOT;
+static DEFINE_SPINLOCK(sock_tag_list_lock);
+
+static struct rb_root tag_counter_set_tree = RB_ROOT;
+static DEFINE_SPINLOCK(tag_counter_set_list_lock);
+
+static struct rb_root uid_tag_data_tree = RB_ROOT;
+static DEFINE_SPINLOCK(uid_tag_data_tree_lock);
+
+static struct rb_root proc_qtu_data_tree = RB_ROOT;
+/* No proc_qtu_data_tree_lock; use uid_tag_data_tree_lock */
+
+static struct qtaguid_event_counts qtu_events;
+/*----------------------------------------------*/
+static bool can_manipulate_uids(void)
+{
+       /* root pwnd */
+       return in_egroup_p(xt_qtaguid_ctrl_file->gid)
+               || unlikely(!from_kuid(&init_user_ns, current_fsuid())) || unlikely(!proc_ctrl_write_limited)
+               || unlikely(uid_eq(current_fsuid(), xt_qtaguid_ctrl_file->uid));
+}
+
+static bool can_impersonate_uid(kuid_t uid)
+{
+       return uid_eq(uid, current_fsuid()) || can_manipulate_uids();
+}
+
+static bool can_read_other_uid_stats(kuid_t uid)
+{
+       /* root pwnd */
+       return in_egroup_p(xt_qtaguid_stats_file->gid)
+               || unlikely(!from_kuid(&init_user_ns, current_fsuid())) || uid_eq(uid, current_fsuid())
+               || unlikely(!proc_stats_readall_limited)
+               || unlikely(uid_eq(current_fsuid(), xt_qtaguid_ctrl_file->uid));
+}
+
+static inline void dc_add_byte_packets(struct data_counters *counters, int set,
+                                 enum ifs_tx_rx direction,
+                                 enum ifs_proto ifs_proto,
+                                 int bytes,
+                                 int packets)
+{
+       counters->bpc[set][direction][ifs_proto].bytes += bytes;
+       counters->bpc[set][direction][ifs_proto].packets += packets;
+}
+
+static struct tag_node *tag_node_tree_search(struct rb_root *root, tag_t tag)
+{
+       struct rb_node *node = root->rb_node;
+
+       while (node) {
+               struct tag_node *data = rb_entry(node, struct tag_node, node);
+               int result;
+               RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
+                        " node=%p data=%p\n", tag, node, data);
+               result = tag_compare(tag, data->tag);
+               RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
+                        " data.tag=0x%llx (uid=%u) res=%d\n",
+                        tag, data->tag, get_uid_from_tag(data->tag), result);
+               if (result < 0)
+                       node = node->rb_left;
+               else if (result > 0)
+                       node = node->rb_right;
+               else
+                       return data;
+       }
+       return NULL;
+}
+
+static void tag_node_tree_insert(struct tag_node *data, struct rb_root *root)
+{
+       struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+       /* Figure out where to put new node */
+       while (*new) {
+               struct tag_node *this = rb_entry(*new, struct tag_node,
+                                                node);
+               int result = tag_compare(data->tag, this->tag);
+               RB_DEBUG("qtaguid: %s(): tag=0x%llx"
+                        " (uid=%u)\n", __func__,
+                        this->tag,
+                        get_uid_from_tag(this->tag));
+               parent = *new;
+               if (result < 0)
+                       new = &((*new)->rb_left);
+               else if (result > 0)
+                       new = &((*new)->rb_right);
+               else
+                       BUG();
+       }
+
+       /* Add new node and rebalance tree. */
+       rb_link_node(&data->node, parent, new);
+       rb_insert_color(&data->node, root);
+}
+
+static void tag_stat_tree_insert(struct tag_stat *data, struct rb_root *root)
+{
+       tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_stat *tag_stat_tree_search(struct rb_root *root, tag_t tag)
+{
+       struct tag_node *node = tag_node_tree_search(root, tag);
+       if (!node)
+               return NULL;
+       return rb_entry(&node->node, struct tag_stat, tn.node);
+}
+
+static void tag_counter_set_tree_insert(struct tag_counter_set *data,
+                                       struct rb_root *root)
+{
+       tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_counter_set *tag_counter_set_tree_search(struct rb_root *root,
+                                                          tag_t tag)
+{
+       struct tag_node *node = tag_node_tree_search(root, tag);
+       if (!node)
+               return NULL;
+       return rb_entry(&node->node, struct tag_counter_set, tn.node);
+
+}
+
+static void tag_ref_tree_insert(struct tag_ref *data, struct rb_root *root)
+{
+       tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_ref *tag_ref_tree_search(struct rb_root *root, tag_t tag)
+{
+       struct tag_node *node = tag_node_tree_search(root, tag);
+       if (!node)
+               return NULL;
+       return rb_entry(&node->node, struct tag_ref, tn.node);
+}
+
+static struct sock_tag *sock_tag_tree_search(struct rb_root *root,
+                                            const struct sock *sk)
+{
+       struct rb_node *node = root->rb_node;
+
+       while (node) {
+               struct sock_tag *data = rb_entry(node, struct sock_tag,
+                                                sock_node);
+               if (sk < data->sk)
+                       node = node->rb_left;
+               else if (sk > data->sk)
+                       node = node->rb_right;
+               else
+                       return data;
+       }
+       return NULL;
+}
+
+static void sock_tag_tree_insert(struct sock_tag *data, struct rb_root *root)
+{
+       struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+       /* Figure out where to put new node */
+       while (*new) {
+               struct sock_tag *this = rb_entry(*new, struct sock_tag,
+                                                sock_node);
+               parent = *new;
+               if (data->sk < this->sk)
+                       new = &((*new)->rb_left);
+               else if (data->sk > this->sk)
+                       new = &((*new)->rb_right);
+               else
+                       BUG();
+       }
+
+       /* Add new node and rebalance tree. */
+       rb_link_node(&data->sock_node, parent, new);
+       rb_insert_color(&data->sock_node, root);
+}
+
+static void sock_tag_tree_erase(struct rb_root *st_to_free_tree)
+{
+       struct rb_node *node;
+       struct sock_tag *st_entry;
+
+       node = rb_first(st_to_free_tree);
+       while (node) {
+               st_entry = rb_entry(node, struct sock_tag, sock_node);
+               node = rb_next(node);
+               CT_DEBUG("qtaguid: %s(): "
+                        "erase st: sk=%p tag=0x%llx (uid=%u)\n", __func__,
+                        st_entry->sk,
+                        st_entry->tag,
+                        get_uid_from_tag(st_entry->tag));
+               rb_erase(&st_entry->sock_node, st_to_free_tree);
+               sockfd_put(st_entry->socket);
+               kfree(st_entry);
+       }
+}
+
+static struct proc_qtu_data *proc_qtu_data_tree_search(struct rb_root *root,
+                                                      const pid_t pid)
+{
+       struct rb_node *node = root->rb_node;
+
+       while (node) {
+               struct proc_qtu_data *data = rb_entry(node,
+                                                     struct proc_qtu_data,
+                                                     node);
+               if (pid < data->pid)
+                       node = node->rb_left;
+               else if (pid > data->pid)
+                       node = node->rb_right;
+               else
+                       return data;
+       }
+       return NULL;
+}
+
+static void proc_qtu_data_tree_insert(struct proc_qtu_data *data,
+                                     struct rb_root *root)
+{
+       struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+       /* Figure out where to put new node */
+       while (*new) {
+               struct proc_qtu_data *this = rb_entry(*new,
+                                                     struct proc_qtu_data,
+                                                     node);
+               parent = *new;
+               if (data->pid < this->pid)
+                       new = &((*new)->rb_left);
+               else if (data->pid > this->pid)
+                       new = &((*new)->rb_right);
+               else
+                       BUG();
+       }
+
+       /* Add new node and rebalance tree. */
+       rb_link_node(&data->node, parent, new);
+       rb_insert_color(&data->node, root);
+}
+
+static void uid_tag_data_tree_insert(struct uid_tag_data *data,
+                                    struct rb_root *root)
+{
+       struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+       /* Figure out where to put new node */
+       while (*new) {
+               struct uid_tag_data *this = rb_entry(*new,
+                                                    struct uid_tag_data,
+                                                    node);
+               parent = *new;
+               if (data->uid < this->uid)
+                       new = &((*new)->rb_left);
+               else if (data->uid > this->uid)
+                       new = &((*new)->rb_right);
+               else
+                       BUG();
+       }
+
+       /* Add new node and rebalance tree. */
+       rb_link_node(&data->node, parent, new);
+       rb_insert_color(&data->node, root);
+}
+
+static struct uid_tag_data *uid_tag_data_tree_search(struct rb_root *root,
+                                                    uid_t uid)
+{
+       struct rb_node *node = root->rb_node;
+
+       while (node) {
+               struct uid_tag_data *data = rb_entry(node,
+                                                    struct uid_tag_data,
+                                                    node);
+               if (uid < data->uid)
+                       node = node->rb_left;
+               else if (uid > data->uid)
+                       node = node->rb_right;
+               else
+                       return data;
+       }
+       return NULL;
+}
+
+/*
+ * Allocates a new uid_tag_data struct if needed.
+ * Returns a pointer to the found or allocated uid_tag_data.
+ * Returns a PTR_ERR on failures, and lock is not held.
+ * If found is not NULL:
+ *   sets *found to true if not allocated.
+ *   sets *found to false if allocated.
+ */
+struct uid_tag_data *get_uid_data(uid_t uid, bool *found_res)
+{
+       struct uid_tag_data *utd_entry;
+
+       /* Look for top level uid_tag_data for the UID */
+       utd_entry = uid_tag_data_tree_search(&uid_tag_data_tree, uid);
+       DR_DEBUG("qtaguid: get_uid_data(%u) utd=%p\n", uid, utd_entry);
+
+       if (found_res)
+               *found_res = utd_entry;
+       if (utd_entry)
+               return utd_entry;
+
+       utd_entry = kzalloc(sizeof(*utd_entry), GFP_ATOMIC);
+       if (!utd_entry) {
+               pr_err("qtaguid: get_uid_data(%u): "
+                      "tag data alloc failed\n", uid);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       utd_entry->uid = uid;
+       utd_entry->tag_ref_tree = RB_ROOT;
+       uid_tag_data_tree_insert(utd_entry, &uid_tag_data_tree);
+       DR_DEBUG("qtaguid: get_uid_data(%u) new utd=%p\n", uid, utd_entry);
+       return utd_entry;
+}
+
+/* Never returns NULL. Either PTR_ERR or a valid ptr. */
+static struct tag_ref *new_tag_ref(tag_t new_tag,
+                                  struct uid_tag_data *utd_entry)
+{
+       struct tag_ref *tr_entry;
+       int res;
+
+       if (utd_entry->num_active_tags + 1 > max_sock_tags) {
+               pr_info("qtaguid: new_tag_ref(0x%llx): "
+                       "tag ref alloc quota exceeded. max=%d\n",
+                       new_tag, max_sock_tags);
+               res = -EMFILE;
+               goto err_res;
+
+       }
+
+       tr_entry = kzalloc(sizeof(*tr_entry), GFP_ATOMIC);
+       if (!tr_entry) {
+               pr_err("qtaguid: new_tag_ref(0x%llx): "
+                      "tag ref alloc failed\n",
+                      new_tag);
+               res = -ENOMEM;
+               goto err_res;
+       }
+       tr_entry->tn.tag = new_tag;
+       /* tr_entry->num_sock_tags  handled by caller */
+       utd_entry->num_active_tags++;
+       tag_ref_tree_insert(tr_entry, &utd_entry->tag_ref_tree);
+       DR_DEBUG("qtaguid: new_tag_ref(0x%llx): "
+                " inserted new tag ref %p\n",
+                new_tag, tr_entry);
+       return tr_entry;
+
+err_res:
+       return ERR_PTR(res);
+}
+
+static struct tag_ref *lookup_tag_ref(tag_t full_tag,
+                                     struct uid_tag_data **utd_res)
+{
+       struct uid_tag_data *utd_entry;
+       struct tag_ref *tr_entry;
+       bool found_utd;
+       uid_t uid = get_uid_from_tag(full_tag);
+
+       DR_DEBUG("qtaguid: lookup_tag_ref(tag=0x%llx (uid=%u))\n",
+                full_tag, uid);
+
+       utd_entry = get_uid_data(uid, &found_utd);
+       if (IS_ERR_OR_NULL(utd_entry)) {
+               if (utd_res)
+                       *utd_res = utd_entry;
+               return NULL;
+       }
+
+       tr_entry = tag_ref_tree_search(&utd_entry->tag_ref_tree, full_tag);
+       if (utd_res)
+               *utd_res = utd_entry;
+       DR_DEBUG("qtaguid: lookup_tag_ref(0x%llx) utd_entry=%p tr_entry=%p\n",
+                full_tag, utd_entry, tr_entry);
+       return tr_entry;
+}
+
+/* Never returns NULL. Either PTR_ERR or a valid ptr. */
+static struct tag_ref *get_tag_ref(tag_t full_tag,
+                                  struct uid_tag_data **utd_res)
+{
+       struct uid_tag_data *utd_entry;
+       struct tag_ref *tr_entry;
+
+       DR_DEBUG("qtaguid: get_tag_ref(0x%llx)\n",
+                full_tag);
+       spin_lock_bh(&uid_tag_data_tree_lock);
+       tr_entry = lookup_tag_ref(full_tag, &utd_entry);
+       BUG_ON(IS_ERR_OR_NULL(utd_entry));
+       if (!tr_entry)
+               tr_entry = new_tag_ref(full_tag, utd_entry);
+
+       spin_unlock_bh(&uid_tag_data_tree_lock);
+       if (utd_res)
+               *utd_res = utd_entry;
+       DR_DEBUG("qtaguid: get_tag_ref(0x%llx) utd=%p tr=%p\n",
+                full_tag, utd_entry, tr_entry);
+       return tr_entry;
+}
+
+/* Checks and maybe frees the UID Tag Data entry */
+static void put_utd_entry(struct uid_tag_data *utd_entry)
+{
+       /* Are we done with the UID tag data entry? */
+       if (RB_EMPTY_ROOT(&utd_entry->tag_ref_tree) &&
+               !utd_entry->num_pqd) {
+               DR_DEBUG("qtaguid: %s(): "
+                        "erase utd_entry=%p uid=%u "
+                        "by pid=%u tgid=%u uid=%u\n", __func__,
+                        utd_entry, utd_entry->uid,
+                        current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+               BUG_ON(utd_entry->num_active_tags);
+               rb_erase(&utd_entry->node, &uid_tag_data_tree);
+               kfree(utd_entry);
+       } else {
+               DR_DEBUG("qtaguid: %s(): "
+                        "utd_entry=%p still has %d tags %d proc_qtu_data\n",
+                        __func__, utd_entry, utd_entry->num_active_tags,
+                        utd_entry->num_pqd);
+               BUG_ON(!(utd_entry->num_active_tags ||
+                        utd_entry->num_pqd));
+       }
+}
+
+/*
+ * If no sock_tags are using this tag_ref,
+ * decrements refcount of utd_entry, removes tr_entry
+ * from utd_entry->tag_ref_tree and frees.
+ */
+static void free_tag_ref_from_utd_entry(struct tag_ref *tr_entry,
+                                       struct uid_tag_data *utd_entry)
+{
+       DR_DEBUG("qtaguid: %s(): %p tag=0x%llx (uid=%u)\n", __func__,
+                tr_entry, tr_entry->tn.tag,
+                get_uid_from_tag(tr_entry->tn.tag));
+       if (!tr_entry->num_sock_tags) {
+               BUG_ON(!utd_entry->num_active_tags);
+               utd_entry->num_active_tags--;
+               rb_erase(&tr_entry->tn.node, &utd_entry->tag_ref_tree);
+               DR_DEBUG("qtaguid: %s(): erased %p\n", __func__, tr_entry);
+               kfree(tr_entry);
+       }
+}
+
+static void put_tag_ref_tree(tag_t full_tag, struct uid_tag_data *utd_entry)
+{
+       struct rb_node *node;
+       struct tag_ref *tr_entry;
+       tag_t acct_tag;
+
+       DR_DEBUG("qtaguid: %s(tag=0x%llx (uid=%u))\n", __func__,
+                full_tag, get_uid_from_tag(full_tag));
+       acct_tag = get_atag_from_tag(full_tag);
+       node = rb_first(&utd_entry->tag_ref_tree);
+       while (node) {
+               tr_entry = rb_entry(node, struct tag_ref, tn.node);
+               node = rb_next(node);
+               if (!acct_tag || tr_entry->tn.tag == full_tag)
+                       free_tag_ref_from_utd_entry(tr_entry, utd_entry);
+       }
+}
+
+static ssize_t read_proc_u64(struct file *file, char __user *buf,
+                        size_t size, loff_t *ppos)
+{
+       uint64_t *valuep = PDE_DATA(file_inode(file));
+       char tmp[24];
+       size_t tmp_size;
+
+       tmp_size = scnprintf(tmp, sizeof(tmp), "%llu\n", *valuep);
+       return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
+}
+
+static ssize_t read_proc_bool(struct file *file, char __user *buf,
+                         size_t size, loff_t *ppos)
+{
+       bool *valuep = PDE_DATA(file_inode(file));
+       char tmp[24];
+       size_t tmp_size;
+
+       tmp_size = scnprintf(tmp, sizeof(tmp), "%u\n", *valuep);
+       return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
+}
+
+static int get_active_counter_set(tag_t tag)
+{
+       int active_set = 0;
+       struct tag_counter_set *tcs;
+
+       MT_DEBUG("qtaguid: get_active_counter_set(tag=0x%llx)"
+                " (uid=%u)\n",
+                tag, get_uid_from_tag(tag));
+       /* For now we only handle UID tags for active sets */
+       tag = get_utag_from_tag(tag);
+       spin_lock_bh(&tag_counter_set_list_lock);
+       tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+       if (tcs)
+               active_set = tcs->active_set;
+       spin_unlock_bh(&tag_counter_set_list_lock);
+       return active_set;
+}
+
+/*
+ * Find the entry for tracking the specified interface.
+ * Caller must hold iface_stat_list_lock
+ */
+static struct iface_stat *get_iface_entry(const char *ifname)
+{
+       struct iface_stat *iface_entry;
+
+       /* Find the entry for tracking the specified tag within the interface */
+       if (ifname == NULL) {
+               pr_info("qtaguid: iface_stat: get() NULL device name\n");
+               return NULL;
+       }
+
+       /* Iterate over interfaces */
+       list_for_each_entry(iface_entry, &iface_stat_list, list) {
+               if (!strcmp(ifname, iface_entry->ifname))
+                       goto done;
+       }
+       iface_entry = NULL;
+done:
+       return iface_entry;
+}
+
+/* This is for fmt2 only */
+static void pp_iface_stat_header(struct seq_file *m)
+{
+       seq_puts(m,
+                "ifname "
+                "total_skb_rx_bytes total_skb_rx_packets "
+                "total_skb_tx_bytes total_skb_tx_packets "
+                "rx_tcp_bytes rx_tcp_packets "
+                "rx_udp_bytes rx_udp_packets "
+                "rx_other_bytes rx_other_packets "
+                "tx_tcp_bytes tx_tcp_packets "
+                "tx_udp_bytes tx_udp_packets "
+                "tx_other_bytes tx_other_packets\n"
+       );
+}
+
+static void pp_iface_stat_line(struct seq_file *m,
+                              struct iface_stat *iface_entry)
+{
+       struct data_counters *cnts;
+       int cnt_set = 0;   /* We only use one set for the device */
+       cnts = &iface_entry->totals_via_skb;
+       seq_printf(m, "%s %llu %llu %llu %llu %llu %llu %llu %llu "
+                  "%llu %llu %llu %llu %llu %llu %llu %llu\n",
+                  iface_entry->ifname,
+                  dc_sum_bytes(cnts, cnt_set, IFS_RX),
+                  dc_sum_packets(cnts, cnt_set, IFS_RX),
+                  dc_sum_bytes(cnts, cnt_set, IFS_TX),
+                  dc_sum_packets(cnts, cnt_set, IFS_TX),
+                  cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes,
+                  cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets,
+                  cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes,
+                  cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets,
+                  cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes,
+                  cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets,
+                  cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes,
+                  cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets,
+                  cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes,
+                  cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets,
+                  cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes,
+                  cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets);
+}
+
+struct proc_iface_stat_fmt_info {
+       int fmt;
+};
+
+static void *iface_stat_fmt_proc_start(struct seq_file *m, loff_t *pos)
+{
+       struct proc_iface_stat_fmt_info *p = m->private;
+       loff_t n = *pos;
+
+       /*
+        * This lock will prevent iface_stat_update() from changing active,
+        * and in turn prevent an interface from unregistering itself.
+        */
+       spin_lock_bh(&iface_stat_list_lock);
+
+       if (unlikely(module_passive))
+               return NULL;
+
+       if (!n && p->fmt == 2)
+               pp_iface_stat_header(m);
+
+       return seq_list_start(&iface_stat_list, n);
+}
+
+static void *iface_stat_fmt_proc_next(struct seq_file *m, void *p, loff_t *pos)
+{
+       return seq_list_next(p, &iface_stat_list, pos);
+}
+
+static void iface_stat_fmt_proc_stop(struct seq_file *m, void *p)
+{
+       spin_unlock_bh(&iface_stat_list_lock);
+}
+
+static int iface_stat_fmt_proc_show(struct seq_file *m, void *v)
+{
+       struct proc_iface_stat_fmt_info *p = m->private;
+       struct iface_stat *iface_entry;
+       struct rtnl_link_stats64 dev_stats, *stats;
+       struct rtnl_link_stats64 no_dev_stats = {0};
+
+
+       CT_DEBUG("qtaguid:proc iface_stat_fmt pid=%u tgid=%u uid=%u\n",
+                current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+
+       iface_entry = list_entry(v, struct iface_stat, list);
+
+       if (iface_entry->active) {
+               stats = dev_get_stats(iface_entry->net_dev,
+                                     &dev_stats);
+       } else {
+               stats = &no_dev_stats;
+       }
+       /*
+        * If the meaning of the data changes, then update the fmtX
+        * string.
+        */
+       if (p->fmt == 1) {
+               seq_printf(m, "%s %d %llu %llu %llu %llu %llu %llu %llu %llu\n",
+                          iface_entry->ifname,
+                          iface_entry->active,
+                          iface_entry->totals_via_dev[IFS_RX].bytes,
+                          iface_entry->totals_via_dev[IFS_RX].packets,
+                          iface_entry->totals_via_dev[IFS_TX].bytes,
+                          iface_entry->totals_via_dev[IFS_TX].packets,
+                          stats->rx_bytes, stats->rx_packets,
+                          stats->tx_bytes, stats->tx_packets
+                          );
+       } else {
+               pp_iface_stat_line(m, iface_entry);
+       }
+       return 0;
+}
+
+static const struct file_operations read_u64_fops = {
+       .read           = read_proc_u64,
+       .llseek         = default_llseek,
+};
+
+static const struct file_operations read_bool_fops = {
+       .read           = read_proc_bool,
+       .llseek         = default_llseek,
+};
+
+static void iface_create_proc_worker(struct work_struct *work)
+{
+       struct proc_dir_entry *proc_entry;
+       struct iface_stat_work *isw = container_of(work, struct iface_stat_work,
+                                                  iface_work);
+       struct iface_stat *new_iface  = isw->iface_entry;
+
+       /* iface_entries are not deleted, so safe to manipulate. */
+       proc_entry = proc_mkdir(new_iface->ifname, iface_stat_procdir);
+       if (IS_ERR_OR_NULL(proc_entry)) {
+               pr_err("qtaguid: iface_stat: create_proc(): alloc failed.\n");
+               kfree(isw);
+               return;
+       }
+
+       new_iface->proc_ptr = proc_entry;
+
+       proc_create_data("tx_bytes", proc_iface_perms, proc_entry,
+                        &read_u64_fops,
+                        &new_iface->totals_via_dev[IFS_TX].bytes);
+       proc_create_data("rx_bytes", proc_iface_perms, proc_entry,
+                        &read_u64_fops,
+                        &new_iface->totals_via_dev[IFS_RX].bytes);
+       proc_create_data("tx_packets", proc_iface_perms, proc_entry,
+                        &read_u64_fops,
+                        &new_iface->totals_via_dev[IFS_TX].packets);
+       proc_create_data("rx_packets", proc_iface_perms, proc_entry,
+                        &read_u64_fops,
+                        &new_iface->totals_via_dev[IFS_RX].packets);
+       proc_create_data("active", proc_iface_perms, proc_entry,
+                        &read_bool_fops, &new_iface->active);
+
+       IF_DEBUG("qtaguid: iface_stat: create_proc(): done "
+                "entry=%p dev=%s\n", new_iface, new_iface->ifname);
+       kfree(isw);
+}
+
+/*
+ * Will set the entry's active state, and
+ * update the net_dev accordingly also.
+ */
+static void _iface_stat_set_active(struct iface_stat *entry,
+                                  struct net_device *net_dev,
+                                  bool activate)
+{
+       if (activate) {
+               entry->net_dev = net_dev;
+               entry->active = true;
+               IF_DEBUG("qtaguid: %s(%s): "
+                        "enable tracking. rfcnt=%d\n", __func__,
+                        entry->ifname,
+                        __this_cpu_read(*net_dev->pcpu_refcnt));
+       } else {
+               entry->active = false;
+               entry->net_dev = NULL;
+               IF_DEBUG("qtaguid: %s(%s): "
+                        "disable tracking. rfcnt=%d\n", __func__,
+                        entry->ifname,
+                        __this_cpu_read(*net_dev->pcpu_refcnt));
+
+       }
+}
+
+/* Caller must hold iface_stat_list_lock */
+static struct iface_stat *iface_alloc(struct net_device *net_dev)
+{
+       struct iface_stat *new_iface;
+       struct iface_stat_work *isw;
+
+       new_iface = kzalloc(sizeof(*new_iface), GFP_ATOMIC);
+       if (new_iface == NULL) {
+               pr_err("qtaguid: iface_stat: create(%s): "
+                      "iface_stat alloc failed\n", net_dev->name);
+               return NULL;
+       }
+       new_iface->ifname = kstrdup(net_dev->name, GFP_ATOMIC);
+       if (new_iface->ifname == NULL) {
+               pr_err("qtaguid: iface_stat: create(%s): "
+                      "ifname alloc failed\n", net_dev->name);
+               kfree(new_iface);
+               return NULL;
+       }
+       spin_lock_init(&new_iface->tag_stat_list_lock);
+       new_iface->tag_stat_tree = RB_ROOT;
+       _iface_stat_set_active(new_iface, net_dev, true);
+
+       /*
+        * ipv6 notifier chains are atomic :(
+        * No create_proc_read_entry() for you!
+        */
+       isw = kmalloc(sizeof(*isw), GFP_ATOMIC);
+       if (!isw) {
+               pr_err("qtaguid: iface_stat: create(%s): "
+                      "work alloc failed\n", new_iface->ifname);
+               _iface_stat_set_active(new_iface, net_dev, false);
+               kfree(new_iface->ifname);
+               kfree(new_iface);
+               return NULL;
+       }
+       isw->iface_entry = new_iface;
+       INIT_WORK(&isw->iface_work, iface_create_proc_worker);
+       schedule_work(&isw->iface_work);
+       list_add(&new_iface->list, &iface_stat_list);
+       return new_iface;
+}
+
+static void iface_check_stats_reset_and_adjust(struct net_device *net_dev,
+                                              struct iface_stat *iface)
+{
+       struct rtnl_link_stats64 dev_stats, *stats;
+       bool stats_rewound;
+
+       stats = dev_get_stats(net_dev, &dev_stats);
+       /* No empty packets */
+       stats_rewound =
+               (stats->rx_bytes < iface->last_known[IFS_RX].bytes)
+               || (stats->tx_bytes < iface->last_known[IFS_TX].bytes);
+
+       IF_DEBUG("qtaguid: %s(%s): iface=%p netdev=%p "
+                "bytes rx/tx=%llu/%llu "
+                "active=%d last_known=%d "
+                "stats_rewound=%d\n", __func__,
+                net_dev ? net_dev->name : "?",
+                iface, net_dev,
+                stats->rx_bytes, stats->tx_bytes,
+                iface->active, iface->last_known_valid, stats_rewound);
+
+       if (iface->active && iface->last_known_valid && stats_rewound) {
+               pr_warn_once("qtaguid: iface_stat: %s(%s): "
+                            "iface reset its stats unexpectedly\n", __func__,
+                            net_dev->name);
+
+               iface->totals_via_dev[IFS_TX].bytes +=
+                       iface->last_known[IFS_TX].bytes;
+               iface->totals_via_dev[IFS_TX].packets +=
+                       iface->last_known[IFS_TX].packets;
+               iface->totals_via_dev[IFS_RX].bytes +=
+                       iface->last_known[IFS_RX].bytes;
+               iface->totals_via_dev[IFS_RX].packets +=
+                       iface->last_known[IFS_RX].packets;
+               iface->last_known_valid = false;
+               IF_DEBUG("qtaguid: %s(%s): iface=%p "
+                        "used last known bytes rx/tx=%llu/%llu\n", __func__,
+                        iface->ifname, iface, iface->last_known[IFS_RX].bytes,
+                        iface->last_known[IFS_TX].bytes);
+       }
+}
+
+/*
+ * Create a new entry for tracking the specified interface.
+ * Do nothing if the entry already exists.
+ * Called when an interface is configured with a valid IP address.
+ */
+static void iface_stat_create(struct net_device *net_dev,
+                             struct in_ifaddr *ifa)
+{
+       struct in_device *in_dev = NULL;
+       const char *ifname;
+       struct iface_stat *entry;
+       __be32 ipaddr = 0;
+       struct iface_stat *new_iface;
+
+       IF_DEBUG("qtaguid: iface_stat: create(%s): ifa=%p netdev=%p\n",
+                net_dev ? net_dev->name : "?",
+                ifa, net_dev);
+       if (!net_dev) {
+               pr_err("qtaguid: iface_stat: create(): no net dev\n");
+               return;
+       }
+
+       ifname = net_dev->name;
+       if (!ifa) {
+               in_dev = in_dev_get(net_dev);
+               if (!in_dev) {
+                       pr_err("qtaguid: iface_stat: create(%s): no inet dev\n",
+                              ifname);
+                       return;
+               }
+               IF_DEBUG("qtaguid: iface_stat: create(%s): in_dev=%p\n",
+                        ifname, in_dev);
+               for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
+                       IF_DEBUG("qtaguid: iface_stat: create(%s): "
+                                "ifa=%p ifa_label=%s\n",
+                                ifname, ifa,
+                                ifa->ifa_label ? ifa->ifa_label : "(null)");
+                       if (ifa->ifa_label && !strcmp(ifname, ifa->ifa_label))
+                               break;
+               }
+       }
+
+       if (!ifa) {
+               IF_DEBUG("qtaguid: iface_stat: create(%s): no matching IP\n",
+                        ifname);
+               goto done_put;
+       }
+       ipaddr = ifa->ifa_local;
+
+       spin_lock_bh(&iface_stat_list_lock);
+       entry = get_iface_entry(ifname);
+       if (entry != NULL) {
+               IF_DEBUG("qtaguid: iface_stat: create(%s): entry=%p\n",
+                        ifname, entry);
+               iface_check_stats_reset_and_adjust(net_dev, entry);
+               _iface_stat_set_active(entry, net_dev, true);
+               IF_DEBUG("qtaguid: %s(%s): "
+                        "tracking now %d on ip=%pI4\n", __func__,
+                        entry->ifname, true, &ipaddr);
+               goto done_unlock_put;
+       }
+
+       new_iface = iface_alloc(net_dev);
+       IF_DEBUG("qtaguid: iface_stat: create(%s): done "
+                "entry=%p ip=%pI4\n", ifname, new_iface, &ipaddr);
+done_unlock_put:
+       spin_unlock_bh(&iface_stat_list_lock);
+done_put:
+       if (in_dev)
+               in_dev_put(in_dev);
+}
+
+static void iface_stat_create_ipv6(struct net_device *net_dev,
+                                  struct inet6_ifaddr *ifa)
+{
+       struct in_device *in_dev;
+       const char *ifname;
+       struct iface_stat *entry;
+       struct iface_stat *new_iface;
+       int addr_type;
+
+       IF_DEBUG("qtaguid: iface_stat: create6(): ifa=%p netdev=%p->name=%s\n",
+                ifa, net_dev, net_dev ? net_dev->name : "");
+       if (!net_dev) {
+               pr_err("qtaguid: iface_stat: create6(): no net dev!\n");
+               return;
+       }
+       ifname = net_dev->name;
+
+       in_dev = in_dev_get(net_dev);
+       if (!in_dev) {
+               pr_err("qtaguid: iface_stat: create6(%s): no inet dev\n",
+                      ifname);
+               return;
+       }
+
+       IF_DEBUG("qtaguid: iface_stat: create6(%s): in_dev=%p\n",
+                ifname, in_dev);
+
+       if (!ifa) {
+               IF_DEBUG("qtaguid: iface_stat: create6(%s): no matching IP\n",
+                        ifname);
+               goto done_put;
+       }
+       addr_type = ipv6_addr_type(&ifa->addr);
+
+       spin_lock_bh(&iface_stat_list_lock);
+       entry = get_iface_entry(ifname);
+       if (entry != NULL) {
+               IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+                        ifname, entry);
+               iface_check_stats_reset_and_adjust(net_dev, entry);
+               _iface_stat_set_active(entry, net_dev, true);
+               IF_DEBUG("qtaguid: %s(%s): "
+                        "tracking now %d on ip=%pI6c\n", __func__,
+                        entry->ifname, true, &ifa->addr);
+               goto done_unlock_put;
+       }
+
+       new_iface = iface_alloc(net_dev);
+       IF_DEBUG("qtaguid: iface_stat: create6(%s): done "
+                "entry=%p ip=%pI6c\n", ifname, new_iface, &ifa->addr);
+
+done_unlock_put:
+       spin_unlock_bh(&iface_stat_list_lock);
+done_put:
+       in_dev_put(in_dev);
+}
+
+static struct sock_tag *get_sock_stat_nl(const struct sock *sk)
+{
+       MT_DEBUG("qtaguid: get_sock_stat_nl(sk=%p)\n", sk);
+       return sock_tag_tree_search(&sock_tag_tree, sk);
+}
+
+static struct sock_tag *get_sock_stat(const struct sock *sk)
+{
+       struct sock_tag *sock_tag_entry;
+       MT_DEBUG("qtaguid: get_sock_stat(sk=%p)\n", sk);
+       if (!sk)
+               return NULL;
+       spin_lock_bh(&sock_tag_list_lock);
+       sock_tag_entry = get_sock_stat_nl(sk);
+       spin_unlock_bh(&sock_tag_list_lock);
+       return sock_tag_entry;
+}
+
+static int ipx_proto(const struct sk_buff *skb,
+                    struct xt_action_param *par)
+{
+       int thoff = 0, tproto;
+
+       switch (par->family) {
+       case NFPROTO_IPV6:
+               tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
+               if (tproto < 0)
+                       MT_DEBUG("%s(): transport header not found in ipv6"
+                                " skb=%p\n", __func__, skb);
+               break;
+       case NFPROTO_IPV4:
+               tproto = ip_hdr(skb)->protocol;
+               break;
+       default:
+               tproto = IPPROTO_RAW;
+       }
+       return tproto;
+}
+
+static void
+data_counters_update(struct data_counters *dc, int set,
+                    enum ifs_tx_rx direction, int proto, int bytes)
+{
+       switch (proto) {
+       case IPPROTO_TCP:
+               dc_add_byte_packets(dc, set, direction, IFS_TCP, bytes, 1);
+               break;
+       case IPPROTO_UDP:
+               dc_add_byte_packets(dc, set, direction, IFS_UDP, bytes, 1);
+               break;
+       case IPPROTO_IP:
+       default:
+               dc_add_byte_packets(dc, set, direction, IFS_PROTO_OTHER, bytes,
+                                   1);
+               break;
+       }
+}
+
+/*
+ * Update stats for the specified interface. Do nothing if the entry
+ * does not exist (when a device was never configured with an IP address).
+ * Called when an device is being unregistered.
+ */
+static void iface_stat_update(struct net_device *net_dev, bool stash_only)
+{
+       struct rtnl_link_stats64 dev_stats, *stats;
+       struct iface_stat *entry;
+
+       stats = dev_get_stats(net_dev, &dev_stats);
+       spin_lock_bh(&iface_stat_list_lock);
+       entry = get_iface_entry(net_dev->name);
+       if (entry == NULL) {
+               IF_DEBUG("qtaguid: iface_stat: update(%s): not tracked\n",
+                        net_dev->name);
+               spin_unlock_bh(&iface_stat_list_lock);
+               return;
+       }
+
+       IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+                net_dev->name, entry);
+       if (!entry->active) {
+               IF_DEBUG("qtaguid: %s(%s): already disabled\n", __func__,
+                        net_dev->name);
+               spin_unlock_bh(&iface_stat_list_lock);
+               return;
+       }
+
+       if (stash_only) {
+               entry->last_known[IFS_TX].bytes = stats->tx_bytes;
+               entry->last_known[IFS_TX].packets = stats->tx_packets;
+               entry->last_known[IFS_RX].bytes = stats->rx_bytes;
+               entry->last_known[IFS_RX].packets = stats->rx_packets;
+               entry->last_known_valid = true;
+               IF_DEBUG("qtaguid: %s(%s): "
+                        "dev stats stashed rx/tx=%llu/%llu\n", __func__,
+                        net_dev->name, stats->rx_bytes, stats->tx_bytes);
+               spin_unlock_bh(&iface_stat_list_lock);
+               return;
+       }
+       entry->totals_via_dev[IFS_TX].bytes += stats->tx_bytes;
+       entry->totals_via_dev[IFS_TX].packets += stats->tx_packets;
+       entry->totals_via_dev[IFS_RX].bytes += stats->rx_bytes;
+       entry->totals_via_dev[IFS_RX].packets += stats->rx_packets;
+       /* We don't need the last_known[] anymore */
+       entry->last_known_valid = false;
+       _iface_stat_set_active(entry, net_dev, false);
+       IF_DEBUG("qtaguid: %s(%s): "
+                "disable tracking. rx/tx=%llu/%llu\n", __func__,
+                net_dev->name, stats->rx_bytes, stats->tx_bytes);
+       spin_unlock_bh(&iface_stat_list_lock);
+}
+
+/*
+ * Update stats for the specified interface from the skb.
+ * Do nothing if the entry
+ * does not exist (when a device was never configured with an IP address).
+ * Called on each sk.
+ */
+static void iface_stat_update_from_skb(const struct sk_buff *skb,
+                                      struct xt_action_param *par)
+{
+       struct iface_stat *entry;
+       const struct net_device *el_dev;
+       enum ifs_tx_rx direction = par->in ? IFS_RX : IFS_TX;
+       int bytes = skb->len;
+       int proto;
+
+       if (!skb->dev) {
+               MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
+               el_dev = par->in ? : par->out;
+       } else {
+               const struct net_device *other_dev;
+               el_dev = skb->dev;
+               other_dev = par->in ? : par->out;
+               if (el_dev != other_dev) {
+                       MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
+                                "par->(in/out)=%p %s\n",
+                                par->hooknum, el_dev, el_dev->name, other_dev,
+                                other_dev->name);
+               }
+       }
+
+       if (unlikely(!el_dev)) {
+               pr_err_ratelimited("qtaguid[%d]: %s(): no par->in/out?!!\n",
+                                  par->hooknum, __func__);
+               BUG();
+       } else if (unlikely(!el_dev->name)) {
+               pr_err_ratelimited("qtaguid[%d]: %s(): no dev->name?!!\n",
+                                  par->hooknum, __func__);
+               BUG();
+       } else {
+               proto = ipx_proto(skb, par);
+               MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
+                        par->hooknum, el_dev->name, el_dev->type,
+                        par->family, proto);
+       }
+
+       spin_lock_bh(&iface_stat_list_lock);
+       entry = get_iface_entry(el_dev->name);
+       if (entry == NULL) {
+               IF_DEBUG("qtaguid: iface_stat: %s(%s): not tracked\n",
+                        __func__, el_dev->name);
+               spin_unlock_bh(&iface_stat_list_lock);
+               return;
+       }
+
+       IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+                el_dev->name, entry);
+
+       data_counters_update(&entry->totals_via_skb, 0, direction, proto,
+                            bytes);
+       spin_unlock_bh(&iface_stat_list_lock);
+}
+
+static void tag_stat_update(struct tag_stat *tag_entry,
+                       enum ifs_tx_rx direction, int proto, int bytes)
+{
+       int active_set;
+       active_set = get_active_counter_set(tag_entry->tn.tag);
+       MT_DEBUG("qtaguid: tag_stat_update(tag=0x%llx (uid=%u) set=%d "
+                "dir=%d proto=%d bytes=%d)\n",
+                tag_entry->tn.tag, get_uid_from_tag(tag_entry->tn.tag),
+                active_set, direction, proto, bytes);
+       data_counters_update(&tag_entry->counters, active_set, direction,
+                            proto, bytes);
+       if (tag_entry->parent_counters)
+               data_counters_update(tag_entry->parent_counters, active_set,
+                                    direction, proto, bytes);
+}
+
+/*
+ * Create a new entry for tracking the specified {acct_tag,uid_tag} within
+ * the interface.
+ * iface_entry->tag_stat_list_lock should be held.
+ */
+static struct tag_stat *create_if_tag_stat(struct iface_stat *iface_entry,
+                                          tag_t tag)
+{
+       struct tag_stat *new_tag_stat_entry = NULL;
+       IF_DEBUG("qtaguid: iface_stat: %s(): ife=%p tag=0x%llx"
+                " (uid=%u)\n", __func__,
+                iface_entry, tag, get_uid_from_tag(tag));
+       new_tag_stat_entry = kzalloc(sizeof(*new_tag_stat_entry), GFP_ATOMIC);
+       if (!new_tag_stat_entry) {
+               pr_err("qtaguid: iface_stat: tag stat alloc failed\n");
+               goto done;
+       }
+       new_tag_stat_entry->tn.tag = tag;
+       tag_stat_tree_insert(new_tag_stat_entry, &iface_entry->tag_stat_tree);
+done:
+       return new_tag_stat_entry;
+}
+
+static void if_tag_stat_update(const char *ifname, uid_t uid,
+                              const struct sock *sk, enum ifs_tx_rx direction,
+                              int proto, int bytes)
+{
+       struct tag_stat *tag_stat_entry;
+       tag_t tag, acct_tag;
+       tag_t uid_tag;
+       struct data_counters *uid_tag_counters;
+       struct sock_tag *sock_tag_entry;
+       struct iface_stat *iface_entry;
+       struct tag_stat *new_tag_stat = NULL;
+       MT_DEBUG("qtaguid: if_tag_stat_update(ifname=%s "
+               "uid=%u sk=%p dir=%d proto=%d bytes=%d)\n",
+                ifname, uid, sk, direction, proto, bytes);
+
+       spin_lock_bh(&iface_stat_list_lock);
+       iface_entry = get_iface_entry(ifname);
+       if (!iface_entry) {
+               pr_err_ratelimited("qtaguid: iface_stat: stat_update() "
+                                  "%s not found\n", ifname);
+               spin_unlock_bh(&iface_stat_list_lock);
+               return;
+       }
+       /* It is ok to process data when an iface_entry is inactive */
+
+       MT_DEBUG("qtaguid: iface_stat: stat_update() dev=%s entry=%p\n",
+                ifname, iface_entry);
+
+       /*
+        * Look for a tagged sock.
+        * It will have an acct_uid.
+        */
+       sock_tag_entry = get_sock_stat(sk);
+       if (sock_tag_entry) {
+               tag = sock_tag_entry->tag;
+               acct_tag = get_atag_from_tag(tag);
+               uid_tag = get_utag_from_tag(tag);
+       } else {
+               acct_tag = make_atag_from_value(0);
+               tag = combine_atag_with_uid(acct_tag, uid);
+               uid_tag = make_tag_from_uid(uid);
+       }
+       MT_DEBUG("qtaguid: iface_stat: stat_update(): "
+                " looking for tag=0x%llx (uid=%u) in ife=%p\n",
+                tag, get_uid_from_tag(tag), iface_entry);
+       /* Loop over tag list under this interface for {acct_tag,uid_tag} */
+       spin_lock_bh(&iface_entry->tag_stat_list_lock);
+
+       tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+                                             tag);
+       if (tag_stat_entry) {
+               /*
+                * Updating the {acct_tag, uid_tag} entry handles both stats:
+                * {0, uid_tag} will also get updated.
+                */
+               tag_stat_update(tag_stat_entry, direction, proto, bytes);
+               goto unlock;
+       }
+
+       /* Loop over tag list under this interface for {0,uid_tag} */
+       tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+                                             uid_tag);
+       if (!tag_stat_entry) {
+               /* Here: the base uid_tag did not exist */
+               /*
+                * No parent counters. So
+                *  - No {0, uid_tag} stats and no {acc_tag, uid_tag} stats.
+                */
+               new_tag_stat = create_if_tag_stat(iface_entry, uid_tag);
+               if (!new_tag_stat)
+                       goto unlock;
+               uid_tag_counters = &new_tag_stat->counters;
+       } else {
+               uid_tag_counters = &tag_stat_entry->counters;
+       }
+
+       if (acct_tag) {
+               /* Create the child {acct_tag, uid_tag} and hook up parent. */
+               new_tag_stat = create_if_tag_stat(iface_entry, tag);
+               if (!new_tag_stat)
+                       goto unlock;
+               new_tag_stat->parent_counters = uid_tag_counters;
+       } else {
+               /*
+                * For new_tag_stat to be still NULL here would require:
+                *  {0, uid_tag} exists
+                *  and {acct_tag, uid_tag} doesn't exist
+                *  AND acct_tag == 0.
+                * Impossible. This reassures us that new_tag_stat
+                * below will always be assigned.
+                */
+               BUG_ON(!new_tag_stat);
+       }
+       tag_stat_update(new_tag_stat, direction, proto, bytes);
+unlock:
+       spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+       spin_unlock_bh(&iface_stat_list_lock);
+}
+
+static int iface_netdev_event_handler(struct notifier_block *nb,
+                                     unsigned long event, void *ptr) {
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+       if (unlikely(module_passive))
+               return NOTIFY_DONE;
+
+       IF_DEBUG("qtaguid: iface_stat: netdev_event(): "
+                "ev=0x%lx/%s netdev=%p->name=%s\n",
+                event, netdev_evt_str(event), dev, dev ? dev->name : "");
+
+       switch (event) {
+       case NETDEV_UP:
+               iface_stat_create(dev, NULL);
+               atomic64_inc(&qtu_events.iface_events);
+               break;
+       case NETDEV_DOWN:
+       case NETDEV_UNREGISTER:
+               iface_stat_update(dev, event == NETDEV_DOWN);
+               atomic64_inc(&qtu_events.iface_events);
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static int iface_inet6addr_event_handler(struct notifier_block *nb,
+                                        unsigned long event, void *ptr)
+{
+       struct inet6_ifaddr *ifa = ptr;
+       struct net_device *dev;
+
+       if (unlikely(module_passive))
+               return NOTIFY_DONE;
+
+       IF_DEBUG("qtaguid: iface_stat: inet6addr_event(): "
+                "ev=0x%lx/%s ifa=%p\n",
+                event, netdev_evt_str(event), ifa);
+
+       switch (event) {
+       case NETDEV_UP:
+               BUG_ON(!ifa || !ifa->idev);
+               dev = (struct net_device *)ifa->idev->dev;
+               iface_stat_create_ipv6(dev, ifa);
+               atomic64_inc(&qtu_events.iface_events);
+               break;
+       case NETDEV_DOWN:
+       case NETDEV_UNREGISTER:
+               BUG_ON(!ifa || !ifa->idev);
+               dev = (struct net_device *)ifa->idev->dev;
+               iface_stat_update(dev, event == NETDEV_DOWN);
+               atomic64_inc(&qtu_events.iface_events);
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static int iface_inetaddr_event_handler(struct notifier_block *nb,
+                                       unsigned long event, void *ptr)
+{
+       struct in_ifaddr *ifa = ptr;
+       struct net_device *dev;
+
+       if (unlikely(module_passive))
+               return NOTIFY_DONE;
+
+       IF_DEBUG("qtaguid: iface_stat: inetaddr_event(): "
+                "ev=0x%lx/%s ifa=%p\n",
+                event, netdev_evt_str(event), ifa);
+
+       switch (event) {
+       case NETDEV_UP:
+               BUG_ON(!ifa || !ifa->ifa_dev);
+               dev = ifa->ifa_dev->dev;
+               iface_stat_create(dev, ifa);
+               atomic64_inc(&qtu_events.iface_events);
+               break;
+       case NETDEV_DOWN:
+       case NETDEV_UNREGISTER:
+               BUG_ON(!ifa || !ifa->ifa_dev);
+               dev = ifa->ifa_dev->dev;
+               iface_stat_update(dev, event == NETDEV_DOWN);
+               atomic64_inc(&qtu_events.iface_events);
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block iface_netdev_notifier_blk = {
+       .notifier_call = iface_netdev_event_handler,
+};
+
+static struct notifier_block iface_inetaddr_notifier_blk = {
+       .notifier_call = iface_inetaddr_event_handler,
+};
+
+static struct notifier_block iface_inet6addr_notifier_blk = {
+       .notifier_call = iface_inet6addr_event_handler,
+};
+
+static const struct seq_operations iface_stat_fmt_proc_seq_ops = {
+       .start  = iface_stat_fmt_proc_start,
+       .next   = iface_stat_fmt_proc_next,
+       .stop   = iface_stat_fmt_proc_stop,
+       .show   = iface_stat_fmt_proc_show,
+};
+
+static int proc_iface_stat_fmt_open(struct inode *inode, struct file *file)
+{
+       struct proc_iface_stat_fmt_info *s;
+
+       s = __seq_open_private(file, &iface_stat_fmt_proc_seq_ops,
+                       sizeof(struct proc_iface_stat_fmt_info));
+       if (!s)
+               return -ENOMEM;
+
+       s->fmt = (uintptr_t)PDE_DATA(inode);
+       return 0;
+}
+
+static const struct file_operations proc_iface_stat_fmt_fops = {
+       .open           = proc_iface_stat_fmt_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release_private,
+};
+
+static int __init iface_stat_init(struct proc_dir_entry *parent_procdir)
+{
+       int err;
+
+       iface_stat_procdir = proc_mkdir(iface_stat_procdirname, parent_procdir);
+       if (!iface_stat_procdir) {
+               pr_err("qtaguid: iface_stat: init failed to create proc entry\n");
+               err = -1;
+               goto err;
+       }
+
+       iface_stat_all_procfile = proc_create_data(iface_stat_all_procfilename,
+                                                  proc_iface_perms,
+                                                  parent_procdir,
+                                                  &proc_iface_stat_fmt_fops,
+                                                  (void *)1 /* fmt1 */);
+       if (!iface_stat_all_procfile) {
+               pr_err("qtaguid: iface_stat: init "
+                      " failed to create stat_old proc entry\n");
+               err = -1;
+               goto err_zap_entry;
+       }
+
+       iface_stat_fmt_procfile = proc_create_data(iface_stat_fmt_procfilename,
+                                                  proc_iface_perms,
+                                                  parent_procdir,
+                                                  &proc_iface_stat_fmt_fops,
+                                                  (void *)2 /* fmt2 */);
+       if (!iface_stat_fmt_procfile) {
+               pr_err("qtaguid: iface_stat: init "
+                      " failed to create stat_all proc entry\n");
+               err = -1;
+               goto err_zap_all_stats_entry;
+       }
+
+
+       err = register_netdevice_notifier(&iface_netdev_notifier_blk);
+       if (err) {
+               pr_err("qtaguid: iface_stat: init "
+                      "failed to register dev event handler\n");
+               goto err_zap_all_stats_entries;
+       }
+       err = register_inetaddr_notifier(&iface_inetaddr_notifier_blk);
+       if (err) {
+               pr_err("qtaguid: iface_stat: init "
+                      "failed to register ipv4 dev event handler\n");
+               goto err_unreg_nd;
+       }
+
+       err = register_inet6addr_notifier(&iface_inet6addr_notifier_blk);
+       if (err) {
+               pr_err("qtaguid: iface_stat: init "
+                      "failed to register ipv6 dev event handler\n");
+               goto err_unreg_ip4_addr;
+       }
+       return 0;
+
+err_unreg_ip4_addr:
+       unregister_inetaddr_notifier(&iface_inetaddr_notifier_blk);
+err_unreg_nd:
+       unregister_netdevice_notifier(&iface_netdev_notifier_blk);
+err_zap_all_stats_entries:
+       remove_proc_entry(iface_stat_fmt_procfilename, parent_procdir);
+err_zap_all_stats_entry:
+       remove_proc_entry(iface_stat_all_procfilename, parent_procdir);
+err_zap_entry:
+       remove_proc_entry(iface_stat_procdirname, parent_procdir);
+err:
+       return err;
+}
+
+static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
+                                   struct xt_action_param *par)
+{
+       struct sock *sk;
+       unsigned int hook_mask = (1 << par->hooknum);
+
+       MT_DEBUG("qtaguid: find_sk(skb=%p) hooknum=%d family=%d\n", skb,
+                par->hooknum, par->family);
+
+       /*
+        * Let's not abuse the the xt_socket_get*_sk(), or else it will
+        * return garbage SKs.
+        */
+       if (!(hook_mask & XT_SOCKET_SUPPORTED_HOOKS))
+               return NULL;
+
+       switch (par->family) {
+       case NFPROTO_IPV6:
+               sk = xt_socket_lookup_slow_v6(dev_net(skb->dev), skb, par->in);
+               break;
+       case NFPROTO_IPV4:
+               sk = xt_socket_lookup_slow_v4(dev_net(skb->dev), skb, par->in);
+               break;
+       default:
+               return NULL;
+       }
+
+       if (sk) {
+               MT_DEBUG("qtaguid: %p->sk_proto=%u "
+                        "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state);
+               /*
+                * When in TCP_TIME_WAIT the sk is not a "struct sock" but
+                * "struct inet_timewait_sock" which is missing fields.
+                */
+               if (!sk_fullsock(sk) || sk->sk_state  == TCP_TIME_WAIT) {
+                       sock_gen_put(sk);
+                       sk = NULL;
+               }
+       }
+       return sk;
+}
+
+static void account_for_uid(const struct sk_buff *skb,
+                           const struct sock *alternate_sk, uid_t uid,
+                           struct xt_action_param *par)
+{
+       const struct net_device *el_dev;
+
+       if (!skb->dev) {
+               MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
+               el_dev = par->in ? : par->out;
+       } else {
+               const struct net_device *other_dev;
+               el_dev = skb->dev;
+               other_dev = par->in ? : par->out;
+               if (el_dev != other_dev) {
+                       MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
+                               "par->(in/out)=%p %s\n",
+                               par->hooknum, el_dev, el_dev->name, other_dev,
+                               other_dev->name);
+               }
+       }
+
+       if (unlikely(!el_dev)) {
+               pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum);
+       } else if (unlikely(!el_dev->name)) {
+               pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum);
+       } else {
+               int proto = ipx_proto(skb, par);
+               MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
+                        par->hooknum, el_dev->name, el_dev->type,
+                        par->family, proto);
+
+               if_tag_stat_update(el_dev->name, uid,
+                               skb->sk ? skb->sk : alternate_sk,
+                               par->in ? IFS_RX : IFS_TX,
+                               proto, skb->len);
+       }
+}
+
+static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_qtaguid_match_info *info = par->matchinfo;
+       const struct file *filp;
+       bool got_sock = false;
+       struct sock *sk;
+       kuid_t sock_uid;
+       bool res;
+       bool set_sk_callback_lock = false;
+
+       if (unlikely(module_passive))
+               return (info->match ^ info->invert) == 0;
+
+       MT_DEBUG("qtaguid[%d]: entered skb=%p par->in=%p/out=%p fam=%d\n",
+                par->hooknum, skb, par->in, par->out, par->family);
+
+       atomic64_inc(&qtu_events.match_calls);
+       if (skb == NULL) {
+               res = (info->match ^ info->invert) == 0;
+               goto ret_res;
+       }
+
+       switch (par->hooknum) {
+       case NF_INET_PRE_ROUTING:
+       case NF_INET_POST_ROUTING:
+               atomic64_inc(&qtu_events.match_calls_prepost);
+               iface_stat_update_from_skb(skb, par);
+               /*
+                * We are done in pre/post. The skb will get processed
+                * further alter.
+                */
+               res = (info->match ^ info->invert);
+               goto ret_res;
+               break;
+       /* default: Fall through and do UID releated work */
+       }
+
+       sk = skb_to_full_sk(skb);
+       /*
+        * When in TCP_TIME_WAIT the sk is not a "struct sock" but
+        * "struct inet_timewait_sock" which is missing fields.
+        * So we ignore it.
+        */
+       if (sk && sk->sk_state == TCP_TIME_WAIT)
+               sk = NULL;
+       if (sk == NULL) {
+               /*
+                * A missing sk->sk_socket happens when packets are in-flight
+                * and the matching socket is already closed and gone.
+                */
+               sk = qtaguid_find_sk(skb, par);
+               /*
+                * If we got the socket from the find_sk(), we will need to put
+                * it back, as nf_tproxy_get_sock_v4() got it.
+                */
+               got_sock = sk;
+               if (sk)
+                       atomic64_inc(&qtu_events.match_found_sk_in_ct);
+               else
+                       atomic64_inc(&qtu_events.match_found_no_sk_in_ct);
+       } else {
+               atomic64_inc(&qtu_events.match_found_sk);
+       }
+       MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
+                par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
+       if (sk != NULL) {
+               set_sk_callback_lock = true;
+               read_lock_bh(&sk->sk_callback_lock);
+               MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
+                       par->hooknum, sk, sk->sk_socket,
+                       sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
+               filp = sk->sk_socket ? sk->sk_socket->file : NULL;
+               MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
+                       par->hooknum, filp ? from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
+       }
+
+       if (sk == NULL || sk->sk_socket == NULL) {
+               /*
+                * Here, the qtaguid_find_sk() using connection tracking
+                * couldn't find the owner, so for now we just count them
+                * against the system.
+                */
+               /*
+                * TODO: unhack how to force just accounting.
+                * For now we only do iface stats when the uid-owner is not
+                * requested.
+                */
+               if (!(info->match & XT_QTAGUID_UID))
+                       account_for_uid(skb, sk, 0, par);
+               MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n",
+                       par->hooknum,
+                       sk ? sk->sk_socket : NULL);
+               res = (info->match ^ info->invert) == 0;
+               atomic64_inc(&qtu_events.match_no_sk);
+               goto put_sock_ret_res;
+       } else if (info->match & info->invert & XT_QTAGUID_SOCKET) {
+               res = false;
+               goto put_sock_ret_res;
+       }
+       filp = sk->sk_socket->file;
+       if (filp == NULL) {
+               MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum);
+               account_for_uid(skb, sk, 0, par);
+               res = ((info->match ^ info->invert) &
+                       (XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0;
+               atomic64_inc(&qtu_events.match_no_sk_file);
+               goto put_sock_ret_res;
+       }
+       sock_uid = filp->f_cred->fsuid;
+       /*
+        * TODO: unhack how to force just accounting.
+        * For now we only do iface stats when the uid-owner is not requested
+        */
+       if (!(info->match & XT_QTAGUID_UID))
+               account_for_uid(skb, sk, from_kuid(&init_user_ns, sock_uid), par);
+
+       /*
+        * The following two tests fail the match when:
+        *    id not in range AND no inverted condition requested
+        * or id     in range AND    inverted condition requested
+        * Thus (!a && b) || (a && !b) == a ^ b
+        */
+       if (info->match & XT_QTAGUID_UID) {
+               kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min);
+               kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max);
+
+               if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
+                    uid_lte(filp->f_cred->fsuid, uid_max)) ^
+                   !(info->invert & XT_QTAGUID_UID)) {
+                       MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
+                                par->hooknum);
+                       res = false;
+                       goto put_sock_ret_res;
+               }
+       }
+       if (info->match & XT_QTAGUID_GID) {
+               kgid_t gid_min = make_kgid(&init_user_ns, info->gid_min);
+               kgid_t gid_max = make_kgid(&init_user_ns, info->gid_max);
+
+               if ((gid_gte(filp->f_cred->fsgid, gid_min) &&
+                               gid_lte(filp->f_cred->fsgid, gid_max)) ^
+                       !(info->invert & XT_QTAGUID_GID)) {
+                       MT_DEBUG("qtaguid[%d]: leaving gid not matching\n",
+                               par->hooknum);
+                       res = false;
+                       goto put_sock_ret_res;
+               }
+       }
+       MT_DEBUG("qtaguid[%d]: leaving matched\n", par->hooknum);
+       res = true;
+
+put_sock_ret_res:
+       if (got_sock)
+               sock_gen_put(sk);
+       if (set_sk_callback_lock)
+               read_unlock_bh(&sk->sk_callback_lock);
+ret_res:
+       MT_DEBUG("qtaguid[%d]: left %d\n", par->hooknum, res);
+       return res;
+}
+
+#ifdef DDEBUG
+/* This function is not in xt_qtaguid_print.c because of locks visibility */
+static void prdebug_full_state(int indent_level, const char *fmt, ...)
+{
+       va_list args;
+       char *fmt_buff;
+       char *buff;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       fmt_buff = kasprintf(GFP_ATOMIC,
+                            "qtaguid: %s(): %s {\n", __func__, fmt);
+       BUG_ON(!fmt_buff);
+       va_start(args, fmt);
+       buff = kvasprintf(GFP_ATOMIC,
+                         fmt_buff, args);
+       BUG_ON(!buff);
+       pr_debug("%s", buff);
+       kfree(fmt_buff);
+       kfree(buff);
+       va_end(args);
+
+       spin_lock_bh(&sock_tag_list_lock);
+       prdebug_sock_tag_tree(indent_level, &sock_tag_tree);
+       spin_unlock_bh(&sock_tag_list_lock);
+
+       spin_lock_bh(&sock_tag_list_lock);
+       spin_lock_bh(&uid_tag_data_tree_lock);
+       prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree);
+       prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree);
+       spin_unlock_bh(&uid_tag_data_tree_lock);
+       spin_unlock_bh(&sock_tag_list_lock);
+
+       spin_lock_bh(&iface_stat_list_lock);
+       prdebug_iface_stat_list(indent_level, &iface_stat_list);
+       spin_unlock_bh(&iface_stat_list_lock);
+
+       pr_debug("qtaguid: %s(): }\n", __func__);
+}
+#else
+static void prdebug_full_state(int indent_level, const char *fmt, ...) {}
+#endif
+
+struct proc_ctrl_print_info {
+       struct sock *sk; /* socket found by reading to sk_pos */
+       loff_t sk_pos;
+};
+
+static void *qtaguid_ctrl_proc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct proc_ctrl_print_info *pcpi = m->private;
+       struct sock_tag *sock_tag_entry = v;
+       struct rb_node *node;
+
+       (*pos)++;
+
+       if (!v || v  == SEQ_START_TOKEN)
+               return NULL;
+
+       node = rb_next(&sock_tag_entry->sock_node);
+       if (!node) {
+               pcpi->sk = NULL;
+               sock_tag_entry = SEQ_START_TOKEN;
+       } else {
+               sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+               pcpi->sk = sock_tag_entry->sk;
+       }
+       pcpi->sk_pos = *pos;
+       return sock_tag_entry;
+}
+
+static void *qtaguid_ctrl_proc_start(struct seq_file *m, loff_t *pos)
+{
+       struct proc_ctrl_print_info *pcpi = m->private;
+       struct sock_tag *sock_tag_entry;
+       struct rb_node *node;
+
+       spin_lock_bh(&sock_tag_list_lock);
+
+       if (unlikely(module_passive))
+               return NULL;
+
+       if (*pos == 0) {
+               pcpi->sk_pos = 0;
+               node = rb_first(&sock_tag_tree);
+               if (!node) {
+                       pcpi->sk = NULL;
+                       return SEQ_START_TOKEN;
+               }
+               sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+               pcpi->sk = sock_tag_entry->sk;
+       } else {
+               sock_tag_entry = (pcpi->sk ? get_sock_stat_nl(pcpi->sk) :
+                                               NULL) ?: SEQ_START_TOKEN;
+               if (*pos != pcpi->sk_pos) {
+                       /* seq_read skipped a next call */
+                       *pos = pcpi->sk_pos;
+                       return qtaguid_ctrl_proc_next(m, sock_tag_entry, pos);
+               }
+       }
+       return sock_tag_entry;
+}
+
+static void qtaguid_ctrl_proc_stop(struct seq_file *m, void *v)
+{
+       spin_unlock_bh(&sock_tag_list_lock);
+}
+
+/*
+ * Procfs reader to get all active socket tags using style "1)" as described in
+ * fs/proc/generic.c
+ */
+static int qtaguid_ctrl_proc_show(struct seq_file *m, void *v)
+{
+       struct sock_tag *sock_tag_entry = v;
+       uid_t uid;
+       long f_count;
+
+       CT_DEBUG("qtaguid: proc ctrl pid=%u tgid=%u uid=%u\n",
+                current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+
+       if (sock_tag_entry != SEQ_START_TOKEN) {
+               uid = get_uid_from_tag(sock_tag_entry->tag);
+               CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u) "
+                        "pid=%u\n",
+                        sock_tag_entry->sk,
+                        sock_tag_entry->tag,
+                        uid,
+                        sock_tag_entry->pid
+                       );
+               f_count = atomic_long_read(
+                       &sock_tag_entry->socket->file->f_count);
+               seq_printf(m, "sock=%pK tag=0x%llx (uid=%u) pid=%u "
+                          "f_count=%lu\n",
+                          sock_tag_entry->sk,
+                          sock_tag_entry->tag, uid,
+                          sock_tag_entry->pid, f_count);
+       } else {
+               seq_printf(m, "events: sockets_tagged=%llu "
+                          "sockets_untagged=%llu "
+                          "counter_set_changes=%llu "
+                          "delete_cmds=%llu "
+                          "iface_events=%llu "
+                          "match_calls=%llu "
+                          "match_calls_prepost=%llu "
+                          "match_found_sk=%llu "
+                          "match_found_sk_in_ct=%llu "
+                          "match_found_no_sk_in_ct=%llu "
+                          "match_no_sk=%llu "
+                          "match_no_sk_file=%llu\n",
+                          (u64)atomic64_read(&qtu_events.sockets_tagged),
+                          (u64)atomic64_read(&qtu_events.sockets_untagged),
+                          (u64)atomic64_read(&qtu_events.counter_set_changes),
+                          (u64)atomic64_read(&qtu_events.delete_cmds),
+                          (u64)atomic64_read(&qtu_events.iface_events),
+                          (u64)atomic64_read(&qtu_events.match_calls),
+                          (u64)atomic64_read(&qtu_events.match_calls_prepost),
+                          (u64)atomic64_read(&qtu_events.match_found_sk),
+                          (u64)atomic64_read(&qtu_events.match_found_sk_in_ct),
+                          (u64)atomic64_read(&qtu_events.match_found_no_sk_in_ct),
+                          (u64)atomic64_read(&qtu_events.match_no_sk),
+                          (u64)atomic64_read(&qtu_events.match_no_sk_file));
+
+               /* Count the following as part of the last item_index */
+               prdebug_full_state(0, "proc ctrl");
+       }
+
+       return 0;
+}
+
+/*
+ * Delete socket tags, and stat tags associated with a given
+ * accouting tag and uid.
+ */
+static int ctrl_cmd_delete(const char *input)
+{
+       char cmd;
+       int uid_int;
+       kuid_t uid;
+       uid_t entry_uid;
+       tag_t acct_tag;
+       tag_t tag;
+       int res, argc;
+       struct iface_stat *iface_entry;
+       struct rb_node *node;
+       struct sock_tag *st_entry;
+       struct rb_root st_to_free_tree = RB_ROOT;
+       struct tag_stat *ts_entry;
+       struct tag_counter_set *tcs_entry;
+       struct tag_ref *tr_entry;
+       struct uid_tag_data *utd_entry;
+
+       argc = sscanf(input, "%c %llu %u", &cmd, &acct_tag, &uid_int);
+       uid = make_kuid(&init_user_ns, uid_int);
+       CT_DEBUG("qtaguid: ctrl_delete(%s): argc=%d cmd=%c "
+                "user_tag=0x%llx uid=%u\n", input, argc, cmd,
+                acct_tag, uid_int);
+       if (argc < 2) {
+               res = -EINVAL;
+               goto err;
+       }
+       if (!valid_atag(acct_tag)) {
+               pr_info("qtaguid: ctrl_delete(%s): invalid tag\n", input);
+               res = -EINVAL;
+               goto err;
+       }
+       if (argc < 3) {
+               uid = current_fsuid();
+               uid_int = from_kuid(&init_user_ns, uid);
+       } else if (!can_impersonate_uid(uid)) {
+               pr_info("qtaguid: ctrl_delete(%s): "
+                       "insufficient priv from pid=%u tgid=%u uid=%u\n",
+                       input, current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+               res = -EPERM;
+               goto err;
+       }
+
+       tag = combine_atag_with_uid(acct_tag, uid_int);
+       CT_DEBUG("qtaguid: ctrl_delete(%s): "
+                "looking for tag=0x%llx (uid=%u)\n",
+                input, tag, uid_int);
+
+       /* Delete socket tags */
+       spin_lock_bh(&sock_tag_list_lock);
+       node = rb_first(&sock_tag_tree);
+       while (node) {
+               st_entry = rb_entry(node, struct sock_tag, sock_node);
+               entry_uid = get_uid_from_tag(st_entry->tag);
+               node = rb_next(node);
+               if (entry_uid != uid_int)
+                       continue;
+
+               CT_DEBUG("qtaguid: ctrl_delete(%s): st tag=0x%llx (uid=%u)\n",
+                        input, st_entry->tag, entry_uid);
+
+               if (!acct_tag || st_entry->tag == tag) {
+                       rb_erase(&st_entry->sock_node, &sock_tag_tree);
+                       /* Can't sockfd_put() within spinlock, do it later. */
+                       sock_tag_tree_insert(st_entry, &st_to_free_tree);
+                       tr_entry = lookup_tag_ref(st_entry->tag, NULL);
+                       BUG_ON(tr_entry->num_sock_tags <= 0);
+                       tr_entry->num_sock_tags--;
+                       /*
+                        * TODO: remove if, and start failing.
+                        * This is a hack to work around the fact that in some
+                        * places we have "if (IS_ERR_OR_NULL(pqd_entry))"
+                        * and are trying to work around apps
+                        * that didn't open the /dev/xt_qtaguid.
+                        */
+                       if (st_entry->list.next && st_entry->list.prev)
+                               list_del(&st_entry->list);
+               }
+       }
+       spin_unlock_bh(&sock_tag_list_lock);
+
+       sock_tag_tree_erase(&st_to_free_tree);
+
+       /* Delete tag counter-sets */
+       spin_lock_bh(&tag_counter_set_list_lock);
+       /* Counter sets are only on the uid tag, not full tag */
+       tcs_entry = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+       if (tcs_entry) {
+               CT_DEBUG("qtaguid: ctrl_delete(%s): "
+                        "erase tcs: tag=0x%llx (uid=%u) set=%d\n",
+                        input,
+                        tcs_entry->tn.tag,
+                        get_uid_from_tag(tcs_entry->tn.tag),
+                        tcs_entry->active_set);
+               rb_erase(&tcs_entry->tn.node, &tag_counter_set_tree);
+               kfree(tcs_entry);
+       }
+       spin_unlock_bh(&tag_counter_set_list_lock);
+
+       /*
+        * If acct_tag is 0, then all entries belonging to uid are
+        * erased.
+        */
+       spin_lock_bh(&iface_stat_list_lock);
+       list_for_each_entry(iface_entry, &iface_stat_list, list) {
+               spin_lock_bh(&iface_entry->tag_stat_list_lock);
+               node = rb_first(&iface_entry->tag_stat_tree);
+               while (node) {
+                       ts_entry = rb_entry(node, struct tag_stat, tn.node);
+                       entry_uid = get_uid_from_tag(ts_entry->tn.tag);
+                       node = rb_next(node);
+
+                       CT_DEBUG("qtaguid: ctrl_delete(%s): "
+                                "ts tag=0x%llx (uid=%u)\n",
+                                input, ts_entry->tn.tag, entry_uid);
+
+                       if (entry_uid != uid_int)
+                               continue;
+                       if (!acct_tag || ts_entry->tn.tag == tag) {
+                               CT_DEBUG("qtaguid: ctrl_delete(%s): "
+                                        "erase ts: %s 0x%llx %u\n",
+                                        input, iface_entry->ifname,
+                                        get_atag_from_tag(ts_entry->tn.tag),
+                                        entry_uid);
+                               rb_erase(&ts_entry->tn.node,
+                                        &iface_entry->tag_stat_tree);
+                               kfree(ts_entry);
+                       }
+               }
+               spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+       }
+       spin_unlock_bh(&iface_stat_list_lock);
+
+       /* Cleanup the uid_tag_data */
+       spin_lock_bh(&uid_tag_data_tree_lock);
+       node = rb_first(&uid_tag_data_tree);
+       while (node) {
+               utd_entry = rb_entry(node, struct uid_tag_data, node);
+               entry_uid = utd_entry->uid;
+               node = rb_next(node);
+
+               CT_DEBUG("qtaguid: ctrl_delete(%s): "
+                        "utd uid=%u\n",
+                        input, entry_uid);
+
+               if (entry_uid != uid_int)
+                       continue;
+               /*
+                * Go over the tag_refs, and those that don't have
+                * sock_tags using them are freed.
+                */
+               put_tag_ref_tree(tag, utd_entry);
+               put_utd_entry(utd_entry);
+       }
+       spin_unlock_bh(&uid_tag_data_tree_lock);
+
+       atomic64_inc(&qtu_events.delete_cmds);
+       res = 0;
+
+err:
+       return res;
+}
+
+static int ctrl_cmd_counter_set(const char *input)
+{
+       char cmd;
+       uid_t uid = 0;
+       tag_t tag;
+       int res, argc;
+       struct tag_counter_set *tcs;
+       int counter_set;
+
+       argc = sscanf(input, "%c %d %u", &cmd, &counter_set, &uid);
+       CT_DEBUG("qtaguid: ctrl_counterset(%s): argc=%d cmd=%c "
+                "set=%d uid=%u\n", input, argc, cmd,
+                counter_set, uid);
+       if (argc != 3) {
+               res = -EINVAL;
+               goto err;
+       }
+       if (counter_set < 0 || counter_set >= IFS_MAX_COUNTER_SETS) {
+               pr_info("qtaguid: ctrl_counterset(%s): invalid counter_set range\n",
+                       input);
+               res = -EINVAL;
+               goto err;
+       }
+       if (!can_manipulate_uids()) {
+               pr_info("qtaguid: ctrl_counterset(%s): "
+                       "insufficient priv from pid=%u tgid=%u uid=%u\n",
+                       input, current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+               res = -EPERM;
+               goto err;
+       }
+
+       tag = make_tag_from_uid(uid);
+       spin_lock_bh(&tag_counter_set_list_lock);
+       tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+       if (!tcs) {
+               tcs = kzalloc(sizeof(*tcs), GFP_ATOMIC);
+               if (!tcs) {
+                       spin_unlock_bh(&tag_counter_set_list_lock);
+                       pr_err("qtaguid: ctrl_counterset(%s): "
+                              "failed to alloc counter set\n",
+                              input);
+                       res = -ENOMEM;
+                       goto err;
+               }
+               tcs->tn.tag = tag;
+               tag_counter_set_tree_insert(tcs, &tag_counter_set_tree);
+               CT_DEBUG("qtaguid: ctrl_counterset(%s): added tcs tag=0x%llx "
+                        "(uid=%u) set=%d\n",
+                        input, tag, get_uid_from_tag(tag), counter_set);
+       }
+       tcs->active_set = counter_set;
+       spin_unlock_bh(&tag_counter_set_list_lock);
+       atomic64_inc(&qtu_events.counter_set_changes);
+       res = 0;
+
+err:
+       return res;
+}
+
+static int ctrl_cmd_tag(const char *input)
+{
+       char cmd;
+       int sock_fd = 0;
+       kuid_t uid;
+       unsigned int uid_int = 0;
+       tag_t acct_tag = make_atag_from_value(0);
+       tag_t full_tag;
+       struct socket *el_socket;
+       int res, argc;
+       struct sock_tag *sock_tag_entry;
+       struct tag_ref *tag_ref_entry;
+       struct uid_tag_data *uid_tag_data_entry;
+       struct proc_qtu_data *pqd_entry;
+
+       /* Unassigned args will get defaulted later. */
+       argc = sscanf(input, "%c %d %llu %u", &cmd, &sock_fd, &acct_tag, &uid_int);
+       uid = make_kuid(&init_user_ns, uid_int);
+       CT_DEBUG("qtaguid: ctrl_tag(%s): argc=%d cmd=%c sock_fd=%d "
+                "acct_tag=0x%llx uid=%u\n", input, argc, cmd, sock_fd,
+                acct_tag, uid_int);
+       if (argc < 2) {
+               res = -EINVAL;
+               goto err;
+       }
+       el_socket = sockfd_lookup(sock_fd, &res);  /* This locks the file */
+       if (!el_socket) {
+               pr_info("qtaguid: ctrl_tag(%s): failed to lookup"
+                       " sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
+                       input, sock_fd, res, current->pid, current->tgid,
+                       from_kuid(&init_user_ns, current_fsuid()));
+               goto err;
+       }
+       CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n",
+                input, atomic_long_read(&el_socket->file->f_count),
+                el_socket->sk);
+       if (argc < 3) {
+               acct_tag = make_atag_from_value(0);
+       } else if (!valid_atag(acct_tag)) {
+               pr_info("qtaguid: ctrl_tag(%s): invalid tag\n", input);
+               res = -EINVAL;
+               goto err_put;
+       }
+       CT_DEBUG("qtaguid: ctrl_tag(%s): "
+                "pid=%u tgid=%u uid=%u euid=%u fsuid=%u "
+                "ctrl.gid=%u in_group()=%d in_egroup()=%d\n",
+                input, current->pid, current->tgid,
+                from_kuid(&init_user_ns, current_uid()),
+                from_kuid(&init_user_ns, current_euid()),
+                from_kuid(&init_user_ns, current_fsuid()),
+                from_kgid(&init_user_ns, xt_qtaguid_ctrl_file->gid),
+                in_group_p(xt_qtaguid_ctrl_file->gid),
+                in_egroup_p(xt_qtaguid_ctrl_file->gid));
+       if (argc < 4) {
+               uid = current_fsuid();
+               uid_int = from_kuid(&init_user_ns, uid);
+       } else if (!can_impersonate_uid(uid)) {
+               pr_info("qtaguid: ctrl_tag(%s): "
+                       "insufficient priv from pid=%u tgid=%u uid=%u\n",
+                       input, current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+               res = -EPERM;
+               goto err_put;
+       }
+       full_tag = combine_atag_with_uid(acct_tag, uid_int);
+
+       spin_lock_bh(&sock_tag_list_lock);
+       sock_tag_entry = get_sock_stat_nl(el_socket->sk);
+       tag_ref_entry = get_tag_ref(full_tag, &uid_tag_data_entry);
+       if (IS_ERR(tag_ref_entry)) {
+               res = PTR_ERR(tag_ref_entry);
+               spin_unlock_bh(&sock_tag_list_lock);
+               goto err_put;
+       }
+       tag_ref_entry->num_sock_tags++;
+       if (sock_tag_entry) {
+               struct tag_ref *prev_tag_ref_entry;
+
+               CT_DEBUG("qtaguid: ctrl_tag(%s): retag for sk=%p "
+                        "st@%p ...->f_count=%ld\n",
+                        input, el_socket->sk, sock_tag_entry,
+                        atomic_long_read(&el_socket->file->f_count));
+               /*
+                * This is a re-tagging, so release the sock_fd that was
+                * locked at the time of the 1st tagging.
+                * There is still the ref from this call's sockfd_lookup() so
+                * it can be done within the spinlock.
+                */
+               sockfd_put(sock_tag_entry->socket);
+               prev_tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag,
+                                                   &uid_tag_data_entry);
+               BUG_ON(IS_ERR_OR_NULL(prev_tag_ref_entry));
+               BUG_ON(prev_tag_ref_entry->num_sock_tags <= 0);
+               prev_tag_ref_entry->num_sock_tags--;
+               sock_tag_entry->tag = full_tag;
+       } else {
+               CT_DEBUG("qtaguid: ctrl_tag(%s): newtag for sk=%p\n",
+                        input, el_socket->sk);
+               sock_tag_entry = kzalloc(sizeof(*sock_tag_entry),
+                                        GFP_ATOMIC);
+               if (!sock_tag_entry) {
+                       pr_err("qtaguid: ctrl_tag(%s): "
+                              "socket tag alloc failed\n",
+                              input);
+                       spin_unlock_bh(&sock_tag_list_lock);
+                       res = -ENOMEM;
+                       goto err_tag_unref_put;
+               }
+               sock_tag_entry->sk = el_socket->sk;
+               sock_tag_entry->socket = el_socket;
+               sock_tag_entry->pid = current->tgid;
+               sock_tag_entry->tag = combine_atag_with_uid(acct_tag, uid_int);
+               spin_lock_bh(&uid_tag_data_tree_lock);
+               pqd_entry = proc_qtu_data_tree_search(
+                       &proc_qtu_data_tree, current->tgid);
+               /*
+                * TODO: remove if, and start failing.
+                * At first, we want to catch user-space code that is not
+                * opening the /dev/xt_qtaguid.
+                */
+               if (IS_ERR_OR_NULL(pqd_entry))
+                       pr_warn_once(
+                               "qtaguid: %s(): "
+                               "User space forgot to open /dev/xt_qtaguid? "
+                               "pid=%u tgid=%u uid=%u\n", __func__,
+                               current->pid, current->tgid,
+                               from_kuid(&init_user_ns, current_fsuid()));
+               else
+                       list_add(&sock_tag_entry->list,
+                                &pqd_entry->sock_tag_list);
+               spin_unlock_bh(&uid_tag_data_tree_lock);
+
+               sock_tag_tree_insert(sock_tag_entry, &sock_tag_tree);
+               atomic64_inc(&qtu_events.sockets_tagged);
+       }
+       spin_unlock_bh(&sock_tag_list_lock);
+       /* We keep the ref to the socket (file) until it is untagged */
+       CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->f_count=%ld\n",
+                input, sock_tag_entry,
+                atomic_long_read(&el_socket->file->f_count));
+       return 0;
+
+err_tag_unref_put:
+       BUG_ON(tag_ref_entry->num_sock_tags <= 0);
+       tag_ref_entry->num_sock_tags--;
+       free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry);
+err_put:
+       CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->f_count=%ld\n",
+                input, atomic_long_read(&el_socket->file->f_count) - 1);
+       /* Release the sock_fd that was grabbed by sockfd_lookup(). */
+       sockfd_put(el_socket);
+       return res;
+
+err:
+       CT_DEBUG("qtaguid: ctrl_tag(%s): done.\n", input);
+       return res;
+}
+
+static int ctrl_cmd_untag(const char *input)
+{
+       char cmd;
+       int sock_fd = 0;
+       struct socket *el_socket;
+       int res, argc;
+       struct sock_tag *sock_tag_entry;
+       struct tag_ref *tag_ref_entry;
+       struct uid_tag_data *utd_entry;
+       struct proc_qtu_data *pqd_entry;
+
+       argc = sscanf(input, "%c %d", &cmd, &sock_fd);
+       CT_DEBUG("qtaguid: ctrl_untag(%s): argc=%d cmd=%c sock_fd=%d\n",
+                input, argc, cmd, sock_fd);
+       if (argc < 2) {
+               res = -EINVAL;
+               goto err;
+       }
+       el_socket = sockfd_lookup(sock_fd, &res);  /* This locks the file */
+       if (!el_socket) {
+               pr_info("qtaguid: ctrl_untag(%s): failed to lookup"
+                       " sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
+                       input, sock_fd, res, current->pid, current->tgid,
+                       from_kuid(&init_user_ns, current_fsuid()));
+               goto err;
+       }
+       CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n",
+                input, atomic_long_read(&el_socket->file->f_count),
+                el_socket->sk);
+       spin_lock_bh(&sock_tag_list_lock);
+       sock_tag_entry = get_sock_stat_nl(el_socket->sk);
+       if (!sock_tag_entry) {
+               spin_unlock_bh(&sock_tag_list_lock);
+               res = -EINVAL;
+               goto err_put;
+       }
+       /*
+        * The socket already belongs to the current process
+        * so it can do whatever it wants to it.
+        */
+       rb_erase(&sock_tag_entry->sock_node, &sock_tag_tree);
+
+       tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag, &utd_entry);
+       BUG_ON(!tag_ref_entry);
+       BUG_ON(tag_ref_entry->num_sock_tags <= 0);
+       spin_lock_bh(&uid_tag_data_tree_lock);
+       pqd_entry = proc_qtu_data_tree_search(
+               &proc_qtu_data_tree, current->tgid);
+       /*
+        * TODO: remove if, and start failing.
+        * At first, we want to catch user-space code that is not
+        * opening the /dev/xt_qtaguid.
+        */
+       if (IS_ERR_OR_NULL(pqd_entry))
+               pr_warn_once("qtaguid: %s(): "
+                            "User space forgot to open /dev/xt_qtaguid? "
+                            "pid=%u tgid=%u uid=%u\n", __func__,
+                            current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+       else
+               list_del(&sock_tag_entry->list);
+       spin_unlock_bh(&uid_tag_data_tree_lock);
+       /*
+        * We don't free tag_ref from the utd_entry here,
+        * only during a cmd_delete().
+        */
+       tag_ref_entry->num_sock_tags--;
+       spin_unlock_bh(&sock_tag_list_lock);
+       /*
+        * Release the sock_fd that was grabbed at tag time,
+        * and once more for the sockfd_lookup() here.
+        */
+       sockfd_put(sock_tag_entry->socket);
+       CT_DEBUG("qtaguid: ctrl_untag(%s): done. st@%p ...->f_count=%ld\n",
+                input, sock_tag_entry,
+                atomic_long_read(&el_socket->file->f_count) - 1);
+       sockfd_put(el_socket);
+
+       kfree(sock_tag_entry);
+       atomic64_inc(&qtu_events.sockets_untagged);
+
+       return 0;
+
+err_put:
+       CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%ld\n",
+                input, atomic_long_read(&el_socket->file->f_count) - 1);
+       /* Release the sock_fd that was grabbed by sockfd_lookup(). */
+       sockfd_put(el_socket);
+       return res;
+
+err:
+       CT_DEBUG("qtaguid: ctrl_untag(%s): done.\n", input);
+       return res;
+}
+
+static ssize_t qtaguid_ctrl_parse(const char *input, size_t count)
+{
+       char cmd;
+       ssize_t res;
+
+       CT_DEBUG("qtaguid: ctrl(%s): pid=%u tgid=%u uid=%u\n",
+                input, current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+
+       cmd = input[0];
+       /* Collect params for commands */
+       switch (cmd) {
+       case 'd':
+               res = ctrl_cmd_delete(input);
+               break;
+
+       case 's':
+               res = ctrl_cmd_counter_set(input);
+               break;
+
+       case 't':
+               res = ctrl_cmd_tag(input);
+               break;
+
+       case 'u':
+               res = ctrl_cmd_untag(input);
+               break;
+
+       default:
+               res = -EINVAL;
+               goto err;
+       }
+       if (!res)
+               res = count;
+err:
+       CT_DEBUG("qtaguid: ctrl(%s): res=%zd\n", input, res);
+       return res;
+}
+
+#define MAX_QTAGUID_CTRL_INPUT_LEN 255
+static ssize_t qtaguid_ctrl_proc_write(struct file *file, const char __user *buffer,
+                                  size_t count, loff_t *offp)
+{
+       char input_buf[MAX_QTAGUID_CTRL_INPUT_LEN];
+
+       if (unlikely(module_passive))
+               return count;
+
+       if (count >= MAX_QTAGUID_CTRL_INPUT_LEN)
+               return -EINVAL;
+
+       if (copy_from_user(input_buf, buffer, count))
+               return -EFAULT;
+
+       input_buf[count] = '\0';
+       return qtaguid_ctrl_parse(input_buf, count);
+}
+
+struct proc_print_info {
+       struct iface_stat *iface_entry;
+       int item_index;
+       tag_t tag; /* tag found by reading to tag_pos */
+       off_t tag_pos;
+       int tag_item_index;
+};
+
+static void pp_stats_header(struct seq_file *m)
+{
+       seq_puts(m,
+                "idx iface acct_tag_hex uid_tag_int cnt_set "
+                "rx_bytes rx_packets "
+                "tx_bytes tx_packets "
+                "rx_tcp_bytes rx_tcp_packets "
+                "rx_udp_bytes rx_udp_packets "
+                "rx_other_bytes rx_other_packets "
+                "tx_tcp_bytes tx_tcp_packets "
+                "tx_udp_bytes tx_udp_packets "
+                "tx_other_bytes tx_other_packets\n");
+}
+
+static int pp_stats_line(struct seq_file *m, struct tag_stat *ts_entry,
+                        int cnt_set)
+{
+       struct data_counters *cnts;
+       tag_t tag = ts_entry->tn.tag;
+       uid_t stat_uid = get_uid_from_tag(tag);
+       struct proc_print_info *ppi = m->private;
+       /* Detailed tags are not available to everybody */
+       if (!can_read_other_uid_stats(make_kuid(&init_user_ns,stat_uid))) {
+               CT_DEBUG("qtaguid: stats line: "
+                        "%s 0x%llx %u: insufficient priv "
+                        "from pid=%u tgid=%u uid=%u stats.gid=%u\n",
+                        ppi->iface_entry->ifname,
+                        get_atag_from_tag(tag), stat_uid,
+                        current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()),
+                        from_kgid(&init_user_ns,xt_qtaguid_stats_file->gid));
+               return 0;
+       }
+       ppi->item_index++;
+       cnts = &ts_entry->counters;
+       seq_printf(m, "%d %s 0x%llx %u %u "
+               "%llu %llu "
+               "%llu %llu "
+               "%llu %llu "
+               "%llu %llu "
+               "%llu %llu "
+               "%llu %llu "
+               "%llu %llu "
+               "%llu %llu\n",
+               ppi->item_index,
+               ppi->iface_entry->ifname,
+               get_atag_from_tag(tag),
+               stat_uid,
+               cnt_set,
+               dc_sum_bytes(cnts, cnt_set, IFS_RX),
+               dc_sum_packets(cnts, cnt_set, IFS_RX),
+               dc_sum_bytes(cnts, cnt_set, IFS_TX),
+               dc_sum_packets(cnts, cnt_set, IFS_TX),
+               cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes,
+               cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets,
+               cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes,
+               cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets,
+               cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes,
+               cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets,
+               cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes,
+               cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets,
+               cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes,
+               cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets,
+               cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes,
+               cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets);
+       return seq_has_overflowed(m) ? -ENOSPC : 1;
+}
+
+static bool pp_sets(struct seq_file *m, struct tag_stat *ts_entry)
+{
+       int ret;
+       int counter_set;
+       for (counter_set = 0; counter_set < IFS_MAX_COUNTER_SETS;
+            counter_set++) {
+               ret = pp_stats_line(m, ts_entry, counter_set);
+               if (ret < 0)
+                       return false;
+       }
+       return true;
+}
+
+static int qtaguid_stats_proc_iface_stat_ptr_valid(struct iface_stat *ptr)
+{
+       struct iface_stat *iface_entry;
+
+       if (!ptr)
+               return false;
+
+       list_for_each_entry(iface_entry, &iface_stat_list, list)
+               if (iface_entry == ptr)
+                       return true;
+       return false;
+}
+
+static void qtaguid_stats_proc_next_iface_entry(struct proc_print_info *ppi)
+{
+       spin_unlock_bh(&ppi->iface_entry->tag_stat_list_lock);
+       list_for_each_entry_continue(ppi->iface_entry, &iface_stat_list, list) {
+               spin_lock_bh(&ppi->iface_entry->tag_stat_list_lock);
+               return;
+       }
+       ppi->iface_entry = NULL;
+}
+
+static void *qtaguid_stats_proc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct proc_print_info *ppi = m->private;
+       struct tag_stat *ts_entry;
+       struct rb_node *node;
+
+       if (!v) {
+               pr_err("qtaguid: %s(): unexpected v: NULL\n", __func__);
+               return NULL;
+       }
+
+       (*pos)++;
+
+       if (!ppi->iface_entry || unlikely(module_passive))
+               return NULL;
+
+       if (v == SEQ_START_TOKEN)
+               node = rb_first(&ppi->iface_entry->tag_stat_tree);
+       else
+               node = rb_next(&((struct tag_stat *)v)->tn.node);
+
+       while (!node) {
+               qtaguid_stats_proc_next_iface_entry(ppi);
+               if (!ppi->iface_entry)
+                       return NULL;
+               node = rb_first(&ppi->iface_entry->tag_stat_tree);
+       }
+
+       ts_entry = rb_entry(node, struct tag_stat, tn.node);
+       ppi->tag = ts_entry->tn.tag;
+       ppi->tag_pos = *pos;
+       ppi->tag_item_index = ppi->item_index;
+       return ts_entry;
+}
+
+static void *qtaguid_stats_proc_start(struct seq_file *m, loff_t *pos)
+{
+       struct proc_print_info *ppi = m->private;
+       struct tag_stat *ts_entry = NULL;
+
+       spin_lock_bh(&iface_stat_list_lock);
+
+       if (*pos == 0) {
+               ppi->item_index = 1;
+               ppi->tag_pos = 0;
+               if (list_empty(&iface_stat_list)) {
+                       ppi->iface_entry = NULL;
+               } else {
+                       ppi->iface_entry = list_first_entry(&iface_stat_list,
+                                                           struct iface_stat,
+                                                           list);
+                       spin_lock_bh(&ppi->iface_entry->tag_stat_list_lock);
+               }
+               return SEQ_START_TOKEN;
+       }
+       if (!qtaguid_stats_proc_iface_stat_ptr_valid(ppi->iface_entry)) {
+               if (ppi->iface_entry) {
+                       pr_err("qtaguid: %s(): iface_entry %p not found\n",
+                              __func__, ppi->iface_entry);
+                       ppi->iface_entry = NULL;
+               }
+               return NULL;
+       }
+
+       spin_lock_bh(&ppi->iface_entry->tag_stat_list_lock);
+
+       if (!ppi->tag_pos) {
+               /* seq_read skipped first next call */
+               ts_entry = SEQ_START_TOKEN;
+       } else {
+               ts_entry = tag_stat_tree_search(
+                               &ppi->iface_entry->tag_stat_tree, ppi->tag);
+               if (!ts_entry) {
+                       pr_info("qtaguid: %s(): tag_stat.tag 0x%llx not found. Abort.\n",
+                               __func__, ppi->tag);
+                       return NULL;
+               }
+       }
+
+       if (*pos == ppi->tag_pos) { /* normal resume */
+               ppi->item_index = ppi->tag_item_index;
+       } else {
+               /* seq_read skipped a next call */
+               *pos = ppi->tag_pos;
+               ts_entry = qtaguid_stats_proc_next(m, ts_entry, pos);
+       }
+
+       return ts_entry;
+}
+
+static void qtaguid_stats_proc_stop(struct seq_file *m, void *v)
+{
+       struct proc_print_info *ppi = m->private;
+       if (ppi->iface_entry)
+               spin_unlock_bh(&ppi->iface_entry->tag_stat_list_lock);
+       spin_unlock_bh(&iface_stat_list_lock);
+}
+
+/*
+ * Procfs reader to get all tag stats using style "1)" as described in
+ * fs/proc/generic.c
+ * Groups all protocols tx/rx bytes.
+ */
+static int qtaguid_stats_proc_show(struct seq_file *m, void *v)
+{
+       struct tag_stat *ts_entry = v;
+
+       if (v == SEQ_START_TOKEN)
+               pp_stats_header(m);
+       else
+               pp_sets(m, ts_entry);
+
+       return 0;
+}
+
+/*------------------------------------------*/
+static int qtudev_open(struct inode *inode, struct file *file)
+{
+       struct uid_tag_data *utd_entry;
+       struct proc_qtu_data  *pqd_entry;
+       struct proc_qtu_data  *new_pqd_entry;
+       int res;
+       bool utd_entry_found;
+
+       if (unlikely(qtu_proc_handling_passive))
+               return 0;
+
+       DR_DEBUG("qtaguid: qtudev_open(): pid=%u tgid=%u uid=%u\n",
+                current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+
+       spin_lock_bh(&uid_tag_data_tree_lock);
+
+       /* Look for existing uid data, or alloc one. */
+       utd_entry = get_uid_data(from_kuid(&init_user_ns, current_fsuid()), &utd_entry_found);
+       if (IS_ERR_OR_NULL(utd_entry)) {
+               res = PTR_ERR(utd_entry);
+               goto err_unlock;
+       }
+
+       /* Look for existing PID based proc_data */
+       pqd_entry = proc_qtu_data_tree_search(&proc_qtu_data_tree,
+                                             current->tgid);
+       if (pqd_entry) {
+               pr_err("qtaguid: qtudev_open(): %u/%u %u "
+                      "%s already opened\n",
+                      current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()),
+                      QTU_DEV_NAME);
+               res = -EBUSY;
+               goto err_unlock_free_utd;
+       }
+
+       new_pqd_entry = kzalloc(sizeof(*new_pqd_entry), GFP_ATOMIC);
+       if (!new_pqd_entry) {
+               pr_err("qtaguid: qtudev_open(): %u/%u %u: "
+                      "proc data alloc failed\n",
+                      current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+               res = -ENOMEM;
+               goto err_unlock_free_utd;
+       }
+       new_pqd_entry->pid = current->tgid;
+       INIT_LIST_HEAD(&new_pqd_entry->sock_tag_list);
+       new_pqd_entry->parent_tag_data = utd_entry;
+       utd_entry->num_pqd++;
+
+       proc_qtu_data_tree_insert(new_pqd_entry,
+                                 &proc_qtu_data_tree);
+
+       spin_unlock_bh(&uid_tag_data_tree_lock);
+       DR_DEBUG("qtaguid: tracking data for uid=%u in pqd=%p\n",
+                from_kuid(&init_user_ns, current_fsuid()), new_pqd_entry);
+       file->private_data = new_pqd_entry;
+       return 0;
+
+err_unlock_free_utd:
+       if (!utd_entry_found) {
+               rb_erase(&utd_entry->node, &uid_tag_data_tree);
+               kfree(utd_entry);
+       }
+err_unlock:
+       spin_unlock_bh(&uid_tag_data_tree_lock);
+       return res;
+}
+
+static int qtudev_release(struct inode *inode, struct file *file)
+{
+       struct proc_qtu_data  *pqd_entry = file->private_data;
+       struct uid_tag_data  *utd_entry = pqd_entry->parent_tag_data;
+       struct sock_tag *st_entry;
+       struct rb_root st_to_free_tree = RB_ROOT;
+       struct list_head *entry, *next;
+       struct tag_ref *tr;
+
+       if (unlikely(qtu_proc_handling_passive))
+               return 0;
+
+       /*
+        * Do not trust the current->pid, it might just be a kworker cleaning
+        * up after a dead proc.
+        */
+       DR_DEBUG("qtaguid: qtudev_release(): "
+                "pid=%u tgid=%u uid=%u "
+                "pqd_entry=%p->pid=%u utd_entry=%p->active_tags=%d\n",
+                current->pid, current->tgid, pqd_entry->parent_tag_data->uid,
+                pqd_entry, pqd_entry->pid, utd_entry,
+                utd_entry->num_active_tags);
+
+       spin_lock_bh(&sock_tag_list_lock);
+       spin_lock_bh(&uid_tag_data_tree_lock);
+
+       list_for_each_safe(entry, next, &pqd_entry->sock_tag_list) {
+               st_entry = list_entry(entry, struct sock_tag, list);
+               DR_DEBUG("qtaguid: %s(): "
+                        "erase sock_tag=%p->sk=%p pid=%u tgid=%u uid=%u\n",
+                        __func__,
+                        st_entry, st_entry->sk,
+                        current->pid, current->tgid,
+                        pqd_entry->parent_tag_data->uid);
+
+               utd_entry = uid_tag_data_tree_search(
+                       &uid_tag_data_tree,
+                       get_uid_from_tag(st_entry->tag));
+               BUG_ON(IS_ERR_OR_NULL(utd_entry));
+               DR_DEBUG("qtaguid: %s(): "
+                        "looking for tag=0x%llx in utd_entry=%p\n", __func__,
+                        st_entry->tag, utd_entry);
+               tr = tag_ref_tree_search(&utd_entry->tag_ref_tree,
+                                        st_entry->tag);
+               BUG_ON(!tr);
+               BUG_ON(tr->num_sock_tags <= 0);
+               tr->num_sock_tags--;
+               free_tag_ref_from_utd_entry(tr, utd_entry);
+
+               rb_erase(&st_entry->sock_node, &sock_tag_tree);
+               list_del(&st_entry->list);
+               /* Can't sockfd_put() within spinlock, do it later. */
+               sock_tag_tree_insert(st_entry, &st_to_free_tree);
+
+               /*
+                * Try to free the utd_entry if no other proc_qtu_data is
+                * using it (num_pqd is 0) and it doesn't have active tags
+                * (num_active_tags is 0).
+                */
+               put_utd_entry(utd_entry);
+       }
+
+       rb_erase(&pqd_entry->node, &proc_qtu_data_tree);
+       BUG_ON(pqd_entry->parent_tag_data->num_pqd < 1);
+       pqd_entry->parent_tag_data->num_pqd--;
+       put_utd_entry(pqd_entry->parent_tag_data);
+       kfree(pqd_entry);
+       file->private_data = NULL;
+
+       spin_unlock_bh(&uid_tag_data_tree_lock);
+       spin_unlock_bh(&sock_tag_list_lock);
+
+
+       sock_tag_tree_erase(&st_to_free_tree);
+
+       prdebug_full_state(0, "%s(): pid=%u tgid=%u", __func__,
+                          current->pid, current->tgid);
+       return 0;
+}
+
+/*------------------------------------------*/
+static const struct file_operations qtudev_fops = {
+       .owner = THIS_MODULE,
+       .open = qtudev_open,
+       .release = qtudev_release,
+};
+
+static struct miscdevice qtu_device = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = QTU_DEV_NAME,
+       .fops = &qtudev_fops,
+       /* How sad it doesn't allow for defaults: .mode = S_IRUGO | S_IWUSR */
+};
+
+static const struct seq_operations proc_qtaguid_ctrl_seqops = {
+       .start = qtaguid_ctrl_proc_start,
+       .next = qtaguid_ctrl_proc_next,
+       .stop = qtaguid_ctrl_proc_stop,
+       .show = qtaguid_ctrl_proc_show,
+};
+
+static int proc_qtaguid_ctrl_open(struct inode *inode, struct file *file)
+{
+       return seq_open_private(file, &proc_qtaguid_ctrl_seqops,
+                               sizeof(struct proc_ctrl_print_info));
+}
+
+static const struct file_operations proc_qtaguid_ctrl_fops = {
+       .open           = proc_qtaguid_ctrl_open,
+       .read           = seq_read,
+       .write          = qtaguid_ctrl_proc_write,
+       .llseek         = seq_lseek,
+       .release        = seq_release_private,
+};
+
+static const struct seq_operations proc_qtaguid_stats_seqops = {
+       .start = qtaguid_stats_proc_start,
+       .next = qtaguid_stats_proc_next,
+       .stop = qtaguid_stats_proc_stop,
+       .show = qtaguid_stats_proc_show,
+};
+
+static int proc_qtaguid_stats_open(struct inode *inode, struct file *file)
+{
+       return seq_open_private(file, &proc_qtaguid_stats_seqops,
+                               sizeof(struct proc_print_info));
+}
+
+static const struct file_operations proc_qtaguid_stats_fops = {
+       .open           = proc_qtaguid_stats_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release_private,
+};
+
+/*------------------------------------------*/
+static int __init qtaguid_proc_register(struct proc_dir_entry **res_procdir)
+{
+       int ret;
+       *res_procdir = proc_mkdir(module_procdirname, init_net.proc_net);
+       if (!*res_procdir) {
+               pr_err("qtaguid: failed to create proc/.../xt_qtaguid\n");
+               ret = -ENOMEM;
+               goto no_dir;
+       }
+
+       xt_qtaguid_ctrl_file = proc_create_data("ctrl", proc_ctrl_perms,
+                                               *res_procdir,
+                                               &proc_qtaguid_ctrl_fops,
+                                               NULL);
+       if (!xt_qtaguid_ctrl_file) {
+               pr_err("qtaguid: failed to create xt_qtaguid/ctrl "
+                       " file\n");
+               ret = -ENOMEM;
+               goto no_ctrl_entry;
+       }
+
+       xt_qtaguid_stats_file = proc_create_data("stats", proc_stats_perms,
+                                                *res_procdir,
+                                                &proc_qtaguid_stats_fops,
+                                                NULL);
+       if (!xt_qtaguid_stats_file) {
+               pr_err("qtaguid: failed to create xt_qtaguid/stats "
+                       "file\n");
+               ret = -ENOMEM;
+               goto no_stats_entry;
+       }
+       /*
+        * TODO: add support counter hacking
+        * xt_qtaguid_stats_file->write_proc = qtaguid_stats_proc_write;
+        */
+       return 0;
+
+no_stats_entry:
+       remove_proc_entry("ctrl", *res_procdir);
+no_ctrl_entry:
+       remove_proc_entry("xt_qtaguid", NULL);
+no_dir:
+       return ret;
+}
+
+static struct xt_match qtaguid_mt_reg __read_mostly = {
+       /*
+        * This module masquerades as the "owner" module so that iptables
+        * tools can deal with it.
+        */
+       .name       = "owner",
+       .revision   = 1,
+       .family     = NFPROTO_UNSPEC,
+       .match      = qtaguid_mt,
+       .matchsize  = sizeof(struct xt_qtaguid_match_info),
+       .me         = THIS_MODULE,
+};
+
+static int __init qtaguid_mt_init(void)
+{
+       if (qtaguid_proc_register(&xt_qtaguid_procdir)
+           || iface_stat_init(xt_qtaguid_procdir)
+           || xt_register_match(&qtaguid_mt_reg)
+           || misc_register(&qtu_device))
+               return -1;
+       return 0;
+}
+
+/*
+ * TODO: allow unloading of the module.
+ * For now stats are permanent.
+ * Kconfig forces'y/n' and never an 'm'.
+ */
+
+module_init(qtaguid_mt_init);
+MODULE_AUTHOR("jpa <jpa@google.com>");
+MODULE_DESCRIPTION("Xtables: socket owner+tag matching and associated stats");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_owner");
+MODULE_ALIAS("ip6t_owner");
+MODULE_ALIAS("ipt_qtaguid");
+MODULE_ALIAS("ip6t_qtaguid");
diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h
new file mode 100644 (file)
index 0000000..6dc14a9
--- /dev/null
@@ -0,0 +1,352 @@
+/*
+ * Kernel iptables module to track stats for packets based on user tags.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __XT_QTAGUID_INTERNAL_H__
+#define __XT_QTAGUID_INTERNAL_H__
+
+#include <linux/types.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock_types.h>
+#include <linux/workqueue.h>
+
+/* Iface handling */
+#define IDEBUG_MASK (1<<0)
+/* Iptable Matching. Per packet. */
+#define MDEBUG_MASK (1<<1)
+/* Red-black tree handling. Per packet. */
+#define RDEBUG_MASK (1<<2)
+/* procfs ctrl/stats handling */
+#define CDEBUG_MASK (1<<3)
+/* dev and resource tracking */
+#define DDEBUG_MASK (1<<4)
+
+/* E.g (IDEBUG_MASK | CDEBUG_MASK | DDEBUG_MASK) */
+#define DEFAULT_DEBUG_MASK 0
+
+/*
+ * (Un)Define these *DEBUG to compile out/in the pr_debug calls.
+ * All undef: text size ~ 0x3030; all def: ~ 0x4404.
+ */
+#define IDEBUG
+#define MDEBUG
+#define RDEBUG
+#define CDEBUG
+#define DDEBUG
+
+#define MSK_DEBUG(mask, ...) do {                           \
+               if (unlikely(qtaguid_debug_mask & (mask)))  \
+                       pr_debug(__VA_ARGS__);              \
+       } while (0)
+#ifdef IDEBUG
+#define IF_DEBUG(...) MSK_DEBUG(IDEBUG_MASK, __VA_ARGS__)
+#else
+#define IF_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef MDEBUG
+#define MT_DEBUG(...) MSK_DEBUG(MDEBUG_MASK, __VA_ARGS__)
+#else
+#define MT_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef RDEBUG
+#define RB_DEBUG(...) MSK_DEBUG(RDEBUG_MASK, __VA_ARGS__)
+#else
+#define RB_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef CDEBUG
+#define CT_DEBUG(...) MSK_DEBUG(CDEBUG_MASK, __VA_ARGS__)
+#else
+#define CT_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef DDEBUG
+#define DR_DEBUG(...) MSK_DEBUG(DDEBUG_MASK, __VA_ARGS__)
+#else
+#define DR_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+
+extern uint qtaguid_debug_mask;
+
+/*---------------------------------------------------------------------------*/
+/*
+ * Tags:
+ *
+ * They represent what the data usage counters will be tracked against.
+ * By default a tag is just based on the UID.
+ * The UID is used as the base for policing, and can not be ignored.
+ * So a tag will always at least represent a UID (uid_tag).
+ *
+ * A tag can be augmented with an "accounting tag" which is associated
+ * with a UID.
+ * User space can set the acct_tag portion of the tag which is then used
+ * with sockets: all data belonging to that socket will be counted against the
+ * tag. The policing is then based on the tag's uid_tag portion,
+ * and stats are collected for the acct_tag portion separately.
+ *
+ * There could be
+ * a:  {acct_tag=1, uid_tag=10003}
+ * b:  {acct_tag=2, uid_tag=10003}
+ * c:  {acct_tag=3, uid_tag=10003}
+ * d:  {acct_tag=0, uid_tag=10003}
+ * a, b, and c represent tags associated with specific sockets.
+ * d is for the totals for that uid, including all untagged traffic.
+ * Typically d is used with policing/quota rules.
+ *
+ * We want tag_t big enough to distinguish uid_t and acct_tag.
+ * It might become a struct if needed.
+ * Nothing should be using it as an int.
+ */
+typedef uint64_t tag_t;  /* Only used via accessors */
+
+#define TAG_UID_MASK 0xFFFFFFFFULL
+#define TAG_ACCT_MASK (~0xFFFFFFFFULL)
+
+static inline int tag_compare(tag_t t1, tag_t t2)
+{
+       return t1 < t2 ? -1 : t1 == t2 ? 0 : 1;
+}
+
+static inline tag_t combine_atag_with_uid(tag_t acct_tag, uid_t uid)
+{
+       return acct_tag | uid;
+}
+static inline tag_t make_tag_from_uid(uid_t uid)
+{
+       return uid;
+}
+static inline uid_t get_uid_from_tag(tag_t tag)
+{
+       return tag & TAG_UID_MASK;
+}
+static inline tag_t get_utag_from_tag(tag_t tag)
+{
+       return tag & TAG_UID_MASK;
+}
+static inline tag_t get_atag_from_tag(tag_t tag)
+{
+       return tag & TAG_ACCT_MASK;
+}
+
+static inline bool valid_atag(tag_t tag)
+{
+       return !(tag & TAG_UID_MASK);
+}
+static inline tag_t make_atag_from_value(uint32_t value)
+{
+       return (uint64_t)value << 32;
+}
+/*---------------------------------------------------------------------------*/
+
+/*
+ * Maximum number of socket tags that a UID is allowed to have active.
+ * Multiple processes belonging to the same UID contribute towards this limit.
+ * Special UIDs that can impersonate a UID also contribute (e.g. download
+ * manager, ...)
+ */
+#define DEFAULT_MAX_SOCK_TAGS 1024
+
+/*
+ * For now we only track 2 sets of counters.
+ * The default set is 0.
+ * Userspace can activate another set for a given uid being tracked.
+ */
+#define IFS_MAX_COUNTER_SETS 2
+
+enum ifs_tx_rx {
+       IFS_TX,
+       IFS_RX,
+       IFS_MAX_DIRECTIONS
+};
+
+/* For now, TCP, UDP, the rest */
+enum ifs_proto {
+       IFS_TCP,
+       IFS_UDP,
+       IFS_PROTO_OTHER,
+       IFS_MAX_PROTOS
+};
+
+struct byte_packet_counters {
+       uint64_t bytes;
+       uint64_t packets;
+};
+
+struct data_counters {
+       struct byte_packet_counters bpc[IFS_MAX_COUNTER_SETS][IFS_MAX_DIRECTIONS][IFS_MAX_PROTOS];
+};
+
+static inline uint64_t dc_sum_bytes(struct data_counters *counters,
+                                   int set,
+                                   enum ifs_tx_rx direction)
+{
+       return counters->bpc[set][direction][IFS_TCP].bytes
+               + counters->bpc[set][direction][IFS_UDP].bytes
+               + counters->bpc[set][direction][IFS_PROTO_OTHER].bytes;
+}
+
+static inline uint64_t dc_sum_packets(struct data_counters *counters,
+                                     int set,
+                                     enum ifs_tx_rx direction)
+{
+       return counters->bpc[set][direction][IFS_TCP].packets
+               + counters->bpc[set][direction][IFS_UDP].packets
+               + counters->bpc[set][direction][IFS_PROTO_OTHER].packets;
+}
+
+
+/* Generic X based nodes used as a base for rb_tree ops */
+struct tag_node {
+       struct rb_node node;
+       tag_t tag;
+};
+
+struct tag_stat {
+       struct tag_node tn;
+       struct data_counters counters;
+       /*
+        * If this tag is acct_tag based, we need to count against the
+        * matching parent uid_tag.
+        */
+       struct data_counters *parent_counters;
+};
+
+struct iface_stat {
+       struct list_head list;  /* in iface_stat_list */
+       char *ifname;
+       bool active;
+       /* net_dev is only valid for active iface_stat */
+       struct net_device *net_dev;
+
+       struct byte_packet_counters totals_via_dev[IFS_MAX_DIRECTIONS];
+       struct data_counters totals_via_skb;
+       /*
+        * We keep the last_known, because some devices reset their counters
+        * just before NETDEV_UP, while some will reset just before
+        * NETDEV_REGISTER (which is more normal).
+        * So now, if the device didn't do a NETDEV_UNREGISTER and we see
+        * its current dev stats smaller that what was previously known, we
+        * assume an UNREGISTER and just use the last_known.
+        */
+       struct byte_packet_counters last_known[IFS_MAX_DIRECTIONS];
+       /* last_known is usable when last_known_valid is true */
+       bool last_known_valid;
+
+       struct proc_dir_entry *proc_ptr;
+
+       struct rb_root tag_stat_tree;
+       spinlock_t tag_stat_list_lock;
+};
+
+/* This is needed to create proc_dir_entries from atomic context. */
+struct iface_stat_work {
+       struct work_struct iface_work;
+       struct iface_stat *iface_entry;
+};
+
+/*
+ * Track tag that this socket is transferring data for, and not necessarily
+ * the uid that owns the socket.
+ * This is the tag against which tag_stat.counters will be billed.
+ * These structs need to be looked up by sock and pid.
+ */
+struct sock_tag {
+       struct rb_node sock_node;
+       struct sock *sk;  /* Only used as a number, never dereferenced */
+       /* The socket is needed for sockfd_put() */
+       struct socket *socket;
+       /* Used to associate with a given pid */
+       struct list_head list;   /* in proc_qtu_data.sock_tag_list */
+       pid_t pid;
+
+       tag_t tag;
+};
+
+struct qtaguid_event_counts {
+       /* Various successful events */
+       atomic64_t sockets_tagged;
+       atomic64_t sockets_untagged;
+       atomic64_t counter_set_changes;
+       atomic64_t delete_cmds;
+       atomic64_t iface_events;  /* Number of NETDEV_* events handled */
+
+       atomic64_t match_calls;   /* Number of times iptables called mt */
+       /* Number of times iptables called mt from pre or post routing hooks */
+       atomic64_t match_calls_prepost;
+       /*
+        * match_found_sk_*: numbers related to the netfilter matching
+        * function finding a sock for the sk_buff.
+        * Total skbs processed is sum(match_found*).
+        */
+       atomic64_t match_found_sk;   /* An sk was already in the sk_buff. */
+       /* The connection tracker had or didn't have the sk. */
+       atomic64_t match_found_sk_in_ct;
+       atomic64_t match_found_no_sk_in_ct;
+       /*
+        * No sk could be found. No apparent owner. Could happen with
+        * unsolicited traffic.
+        */
+       atomic64_t match_no_sk;
+       /*
+        * The file ptr in the sk_socket wasn't there.
+        * This might happen for traffic while the socket is being closed.
+        */
+       atomic64_t match_no_sk_file;
+};
+
+/* Track the set active_set for the given tag. */
+struct tag_counter_set {
+       struct tag_node tn;
+       int active_set;
+};
+
+/*----------------------------------------------*/
+/*
+ * The qtu uid data is used to track resources that are created directly or
+ * indirectly by processes (uid tracked).
+ * It is shared by the processes with the same uid.
+ * Some of the resource will be counted to prevent further rogue allocations,
+ * some will need freeing once the owner process (uid) exits.
+ */
+struct uid_tag_data {
+       struct rb_node node;
+       uid_t uid;
+
+       /*
+        * For the uid, how many accounting tags have been set.
+        */
+       int num_active_tags;
+       /* Track the number of proc_qtu_data that reference it */
+       int num_pqd;
+       struct rb_root tag_ref_tree;
+       /* No tag_node_tree_lock; use uid_tag_data_tree_lock */
+};
+
+struct tag_ref {
+       struct tag_node tn;
+
+       /*
+        * This tracks the number of active sockets that have a tag on them
+        * which matches this tag_ref.tn.tag.
+        * A tag ref can live on after the sockets are untagged.
+        * A tag ref can only be removed during a tag delete command.
+        */
+       int num_sock_tags;
+};
+
+struct proc_qtu_data {
+       struct rb_node node;
+       pid_t pid;
+
+       struct uid_tag_data *parent_tag_data;
+
+       /* Tracks the sock_tags that need freeing upon this proc's death */
+       struct list_head sock_tag_list;
+       /* No spinlock_t sock_tag_list_lock; use the global one. */
+};
+
+/*----------------------------------------------*/
+#endif  /* ifndef __XT_QTAGUID_INTERNAL_H__ */
diff --git a/net/netfilter/xt_qtaguid_print.c b/net/netfilter/xt_qtaguid_print.c
new file mode 100644 (file)
index 0000000..f6a00a3
--- /dev/null
@@ -0,0 +1,566 @@
+/*
+ * Pretty printing Support for iptables xt_qtaguid module.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Most of the functions in this file just waste time if DEBUG is not defined.
+ * The matching xt_qtaguid_print.h will static inline empty funcs if the needed
+ * debug flags ore not defined.
+ * Those funcs that fail to allocate memory will panic as there is no need to
+ * hobble allong just pretending to do the requested work.
+ */
+
+#define DEBUG
+
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/net.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+
+
+#include "xt_qtaguid_internal.h"
+#include "xt_qtaguid_print.h"
+
+#ifdef DDEBUG
+
+static void _bug_on_err_or_null(void *ptr)
+{
+       if (IS_ERR_OR_NULL(ptr)) {
+               pr_err("qtaguid: kmalloc failed\n");
+               BUG();
+       }
+}
+
+char *pp_tag_t(tag_t *tag)
+{
+       char *res;
+
+       if (!tag)
+               res = kasprintf(GFP_ATOMIC, "tag_t@null{}");
+       else
+               res = kasprintf(GFP_ATOMIC,
+                               "tag_t@%p{tag=0x%llx, uid=%u}",
+                               tag, *tag, get_uid_from_tag(*tag));
+       _bug_on_err_or_null(res);
+       return res;
+}
+
+char *pp_data_counters(struct data_counters *dc, bool showValues)
+{
+       char *res;
+
+       if (!dc)
+               res = kasprintf(GFP_ATOMIC, "data_counters@null{}");
+       else if (showValues)
+               res = kasprintf(
+                       GFP_ATOMIC, "data_counters@%p{"
+                       "set0{"
+                       "rx{"
+                       "tcp{b=%llu, p=%llu}, "
+                       "udp{b=%llu, p=%llu},"
+                       "other{b=%llu, p=%llu}}, "
+                       "tx{"
+                       "tcp{b=%llu, p=%llu}, "
+                       "udp{b=%llu, p=%llu},"
+                       "other{b=%llu, p=%llu}}}, "
+                       "set1{"
+                       "rx{"
+                       "tcp{b=%llu, p=%llu}, "
+                       "udp{b=%llu, p=%llu},"
+                       "other{b=%llu, p=%llu}}, "
+                       "tx{"
+                       "tcp{b=%llu, p=%llu}, "
+                       "udp{b=%llu, p=%llu},"
+                       "other{b=%llu, p=%llu}}}}",
+                       dc,
+                       dc->bpc[0][IFS_RX][IFS_TCP].bytes,
+                       dc->bpc[0][IFS_RX][IFS_TCP].packets,
+                       dc->bpc[0][IFS_RX][IFS_UDP].bytes,
+                       dc->bpc[0][IFS_RX][IFS_UDP].packets,
+                       dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes,
+                       dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets,
+                       dc->bpc[0][IFS_TX][IFS_TCP].bytes,
+                       dc->bpc[0][IFS_TX][IFS_TCP].packets,
+                       dc->bpc[0][IFS_TX][IFS_UDP].bytes,
+                       dc->bpc[0][IFS_TX][IFS_UDP].packets,
+                       dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes,
+                       dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets,
+                       dc->bpc[1][IFS_RX][IFS_TCP].bytes,
+                       dc->bpc[1][IFS_RX][IFS_TCP].packets,
+                       dc->bpc[1][IFS_RX][IFS_UDP].bytes,
+                       dc->bpc[1][IFS_RX][IFS_UDP].packets,
+                       dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes,
+                       dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets,
+                       dc->bpc[1][IFS_TX][IFS_TCP].bytes,
+                       dc->bpc[1][IFS_TX][IFS_TCP].packets,
+                       dc->bpc[1][IFS_TX][IFS_UDP].bytes,
+                       dc->bpc[1][IFS_TX][IFS_UDP].packets,
+                       dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes,
+                       dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets);
+       else
+               res = kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc);
+       _bug_on_err_or_null(res);
+       return res;
+}
+
+char *pp_tag_node(struct tag_node *tn)
+{
+       char *tag_str;
+       char *res;
+
+       if (!tn) {
+               res = kasprintf(GFP_ATOMIC, "tag_node@null{}");
+               _bug_on_err_or_null(res);
+               return res;
+       }
+       tag_str = pp_tag_t(&tn->tag);
+       res = kasprintf(GFP_ATOMIC,
+                       "tag_node@%p{tag=%s}",
+                       tn, tag_str);
+       _bug_on_err_or_null(res);
+       kfree(tag_str);
+       return res;
+}
+
+char *pp_tag_ref(struct tag_ref *tr)
+{
+       char *tn_str;
+       char *res;
+
+       if (!tr) {
+               res = kasprintf(GFP_ATOMIC, "tag_ref@null{}");
+               _bug_on_err_or_null(res);
+               return res;
+       }
+       tn_str = pp_tag_node(&tr->tn);
+       res = kasprintf(GFP_ATOMIC,
+                       "tag_ref@%p{%s, num_sock_tags=%d}",
+                       tr, tn_str, tr->num_sock_tags);
+       _bug_on_err_or_null(res);
+       kfree(tn_str);
+       return res;
+}
+
+char *pp_tag_stat(struct tag_stat *ts)
+{
+       char *tn_str;
+       char *counters_str;
+       char *parent_counters_str;
+       char *res;
+
+       if (!ts) {
+               res = kasprintf(GFP_ATOMIC, "tag_stat@null{}");
+               _bug_on_err_or_null(res);
+               return res;
+       }
+       tn_str = pp_tag_node(&ts->tn);
+       counters_str = pp_data_counters(&ts->counters, true);
+       parent_counters_str = pp_data_counters(ts->parent_counters, false);
+       res = kasprintf(GFP_ATOMIC,
+                       "tag_stat@%p{%s, counters=%s, parent_counters=%s}",
+                       ts, tn_str, counters_str, parent_counters_str);
+       _bug_on_err_or_null(res);
+       kfree(tn_str);
+       kfree(counters_str);
+       kfree(parent_counters_str);
+       return res;
+}
+
+char *pp_iface_stat(struct iface_stat *is)
+{
+       char *res;
+       if (!is) {
+               res = kasprintf(GFP_ATOMIC, "iface_stat@null{}");
+       } else {
+               struct data_counters *cnts = &is->totals_via_skb;
+               res = kasprintf(GFP_ATOMIC, "iface_stat@%p{"
+                               "list=list_head{...}, "
+                               "ifname=%s, "
+                               "total_dev={rx={bytes=%llu, "
+                               "packets=%llu}, "
+                               "tx={bytes=%llu, "
+                               "packets=%llu}}, "
+                               "total_skb={rx={bytes=%llu, "
+                               "packets=%llu}, "
+                               "tx={bytes=%llu, "
+                               "packets=%llu}}, "
+                               "last_known_valid=%d, "
+                               "last_known={rx={bytes=%llu, "
+                               "packets=%llu}, "
+                               "tx={bytes=%llu, "
+                               "packets=%llu}}, "
+                               "active=%d, "
+                               "net_dev=%p, "
+                               "proc_ptr=%p, "
+                               "tag_stat_tree=rb_root{...}}",
+                               is,
+                               is->ifname,
+                               is->totals_via_dev[IFS_RX].bytes,
+                               is->totals_via_dev[IFS_RX].packets,
+                               is->totals_via_dev[IFS_TX].bytes,
+                               is->totals_via_dev[IFS_TX].packets,
+                               dc_sum_bytes(cnts, 0, IFS_RX),
+                               dc_sum_packets(cnts, 0, IFS_RX),
+                               dc_sum_bytes(cnts, 0, IFS_TX),
+                               dc_sum_packets(cnts, 0, IFS_TX),
+                               is->last_known_valid,
+                               is->last_known[IFS_RX].bytes,
+                               is->last_known[IFS_RX].packets,
+                               is->last_known[IFS_TX].bytes,
+                               is->last_known[IFS_TX].packets,
+                               is->active,
+                               is->net_dev,
+                               is->proc_ptr);
+       }
+       _bug_on_err_or_null(res);
+       return res;
+}
+
+char *pp_sock_tag(struct sock_tag *st)
+{
+       char *tag_str;
+       char *res;
+
+       if (!st) {
+               res = kasprintf(GFP_ATOMIC, "sock_tag@null{}");
+               _bug_on_err_or_null(res);
+               return res;
+       }
+       tag_str = pp_tag_t(&st->tag);
+       res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
+                       "sock_node=rb_node{...}, "
+                       "sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
+                       "pid=%u, tag=%s}",
+                       st, st->sk, st->socket, atomic_long_read(
+                               &st->socket->file->f_count),
+                       st->pid, tag_str);
+       _bug_on_err_or_null(res);
+       kfree(tag_str);
+       return res;
+}
+
+char *pp_uid_tag_data(struct uid_tag_data *utd)
+{
+       char *res;
+
+       if (!utd)
+               res = kasprintf(GFP_ATOMIC, "uid_tag_data@null{}");
+       else
+               res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{"
+                               "uid=%u, num_active_acct_tags=%d, "
+                               "num_pqd=%d, "
+                               "tag_node_tree=rb_root{...}, "
+                               "proc_qtu_data_tree=rb_root{...}}",
+                               utd, utd->uid,
+                               utd->num_active_tags, utd->num_pqd);
+       _bug_on_err_or_null(res);
+       return res;
+}
+
+char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
+{
+       char *parent_tag_data_str;
+       char *res;
+
+       if (!pqd) {
+               res = kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}");
+               _bug_on_err_or_null(res);
+               return res;
+       }
+       parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data);
+       res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{"
+                       "node=rb_node{...}, pid=%u, "
+                       "parent_tag_data=%s, "
+                       "sock_tag_list=list_head{...}}",
+                       pqd, pqd->pid, parent_tag_data_str
+               );
+       _bug_on_err_or_null(res);
+       kfree(parent_tag_data_str);
+       return res;
+}
+
+/*------------------------------------------*/
+void prdebug_sock_tag_tree(int indent_level,
+                          struct rb_root *sock_tag_tree)
+{
+       struct rb_node *node;
+       struct sock_tag *sock_tag_entry;
+       char *str;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       if (RB_EMPTY_ROOT(sock_tag_tree)) {
+               str = "sock_tag_tree=rb_root{}";
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               return;
+       }
+
+       str = "sock_tag_tree=rb_root{";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+       indent_level++;
+       for (node = rb_first(sock_tag_tree);
+            node;
+            node = rb_next(node)) {
+               sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+               str = pp_sock_tag(sock_tag_entry);
+               pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+               kfree(str);
+       }
+       indent_level--;
+       str = "}";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_sock_tag_list(int indent_level,
+                          struct list_head *sock_tag_list)
+{
+       struct sock_tag *sock_tag_entry;
+       char *str;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       if (list_empty(sock_tag_list)) {
+               str = "sock_tag_list=list_head{}";
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               return;
+       }
+
+       str = "sock_tag_list=list_head{";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+       indent_level++;
+       list_for_each_entry(sock_tag_entry, sock_tag_list, list) {
+               str = pp_sock_tag(sock_tag_entry);
+               pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+               kfree(str);
+       }
+       indent_level--;
+       str = "}";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_proc_qtu_data_tree(int indent_level,
+                               struct rb_root *proc_qtu_data_tree)
+{
+       char *str;
+       struct rb_node *node;
+       struct proc_qtu_data *proc_qtu_data_entry;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       if (RB_EMPTY_ROOT(proc_qtu_data_tree)) {
+               str = "proc_qtu_data_tree=rb_root{}";
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               return;
+       }
+
+       str = "proc_qtu_data_tree=rb_root{";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+       indent_level++;
+       for (node = rb_first(proc_qtu_data_tree);
+            node;
+            node = rb_next(node)) {
+               proc_qtu_data_entry = rb_entry(node,
+                                              struct proc_qtu_data,
+                                              node);
+               str = pp_proc_qtu_data(proc_qtu_data_entry);
+               pr_debug("%*d: %s,\n", indent_level*2, indent_level,
+                        str);
+               kfree(str);
+               indent_level++;
+               prdebug_sock_tag_list(indent_level,
+                                     &proc_qtu_data_entry->sock_tag_list);
+               indent_level--;
+
+       }
+       indent_level--;
+       str = "}";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
+{
+       char *str;
+       struct rb_node *node;
+       struct tag_ref *tag_ref_entry;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       if (RB_EMPTY_ROOT(tag_ref_tree)) {
+               str = "tag_ref_tree{}";
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               return;
+       }
+
+       str = "tag_ref_tree{";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+       indent_level++;
+       for (node = rb_first(tag_ref_tree);
+            node;
+            node = rb_next(node)) {
+               tag_ref_entry = rb_entry(node,
+                                        struct tag_ref,
+                                        tn.node);
+               str = pp_tag_ref(tag_ref_entry);
+               pr_debug("%*d: %s,\n", indent_level*2, indent_level,
+                        str);
+               kfree(str);
+       }
+       indent_level--;
+       str = "}";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_uid_tag_data_tree(int indent_level,
+                              struct rb_root *uid_tag_data_tree)
+{
+       char *str;
+       struct rb_node *node;
+       struct uid_tag_data *uid_tag_data_entry;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       if (RB_EMPTY_ROOT(uid_tag_data_tree)) {
+               str = "uid_tag_data_tree=rb_root{}";
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               return;
+       }
+
+       str = "uid_tag_data_tree=rb_root{";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+       indent_level++;
+       for (node = rb_first(uid_tag_data_tree);
+            node;
+            node = rb_next(node)) {
+               uid_tag_data_entry = rb_entry(node, struct uid_tag_data,
+                                             node);
+               str = pp_uid_tag_data(uid_tag_data_entry);
+               pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+               kfree(str);
+               if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) {
+                       indent_level++;
+                       prdebug_tag_ref_tree(indent_level,
+                                            &uid_tag_data_entry->tag_ref_tree);
+                       indent_level--;
+               }
+       }
+       indent_level--;
+       str = "}";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_tag_stat_tree(int indent_level,
+                                 struct rb_root *tag_stat_tree)
+{
+       char *str;
+       struct rb_node *node;
+       struct tag_stat *ts_entry;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       if (RB_EMPTY_ROOT(tag_stat_tree)) {
+               str = "tag_stat_tree{}";
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               return;
+       }
+
+       str = "tag_stat_tree{";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+       indent_level++;
+       for (node = rb_first(tag_stat_tree);
+            node;
+            node = rb_next(node)) {
+               ts_entry = rb_entry(node, struct tag_stat, tn.node);
+               str = pp_tag_stat(ts_entry);
+               pr_debug("%*d: %s\n", indent_level*2, indent_level,
+                        str);
+               kfree(str);
+       }
+       indent_level--;
+       str = "}";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_iface_stat_list(int indent_level,
+                            struct list_head *iface_stat_list)
+{
+       char *str;
+       struct iface_stat *iface_entry;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       if (list_empty(iface_stat_list)) {
+               str = "iface_stat_list=list_head{}";
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               return;
+       }
+
+       str = "iface_stat_list=list_head{";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+       indent_level++;
+       list_for_each_entry(iface_entry, iface_stat_list, list) {
+               str = pp_iface_stat(iface_entry);
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               kfree(str);
+
+               spin_lock_bh(&iface_entry->tag_stat_list_lock);
+               if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) {
+                       indent_level++;
+                       prdebug_tag_stat_tree(indent_level,
+                                             &iface_entry->tag_stat_tree);
+                       indent_level--;
+               }
+               spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+       }
+       indent_level--;
+       str = "}";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+#endif  /* ifdef DDEBUG */
+/*------------------------------------------*/
+static const char * const netdev_event_strings[] = {
+       "netdev_unknown",
+       "NETDEV_UP",
+       "NETDEV_DOWN",
+       "NETDEV_REBOOT",
+       "NETDEV_CHANGE",
+       "NETDEV_REGISTER",
+       "NETDEV_UNREGISTER",
+       "NETDEV_CHANGEMTU",
+       "NETDEV_CHANGEADDR",
+       "NETDEV_GOING_DOWN",
+       "NETDEV_CHANGENAME",
+       "NETDEV_FEAT_CHANGE",
+       "NETDEV_BONDING_FAILOVER",
+       "NETDEV_PRE_UP",
+       "NETDEV_PRE_TYPE_CHANGE",
+       "NETDEV_POST_TYPE_CHANGE",
+       "NETDEV_POST_INIT",
+       "NETDEV_UNREGISTER_BATCH",
+       "NETDEV_RELEASE",
+       "NETDEV_NOTIFY_PEERS",
+       "NETDEV_JOIN",
+};
+
+const char *netdev_evt_str(int netdev_event)
+{
+       if (netdev_event < 0
+           || netdev_event >= ARRAY_SIZE(netdev_event_strings))
+               return "bad event num";
+       return netdev_event_strings[netdev_event];
+}
diff --git a/net/netfilter/xt_qtaguid_print.h b/net/netfilter/xt_qtaguid_print.h
new file mode 100644 (file)
index 0000000..b63871a
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * Pretty printing Support for iptables xt_qtaguid module.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __XT_QTAGUID_PRINT_H__
+#define __XT_QTAGUID_PRINT_H__
+
+#include "xt_qtaguid_internal.h"
+
+#ifdef DDEBUG
+
+char *pp_tag_t(tag_t *tag);
+char *pp_data_counters(struct data_counters *dc, bool showValues);
+char *pp_tag_node(struct tag_node *tn);
+char *pp_tag_ref(struct tag_ref *tr);
+char *pp_tag_stat(struct tag_stat *ts);
+char *pp_iface_stat(struct iface_stat *is);
+char *pp_sock_tag(struct sock_tag *st);
+char *pp_uid_tag_data(struct uid_tag_data *qtd);
+char *pp_proc_qtu_data(struct proc_qtu_data *pqd);
+
+/*------------------------------------------*/
+void prdebug_sock_tag_list(int indent_level,
+                          struct list_head *sock_tag_list);
+void prdebug_sock_tag_tree(int indent_level,
+                          struct rb_root *sock_tag_tree);
+void prdebug_proc_qtu_data_tree(int indent_level,
+                               struct rb_root *proc_qtu_data_tree);
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree);
+void prdebug_uid_tag_data_tree(int indent_level,
+                              struct rb_root *uid_tag_data_tree);
+void prdebug_tag_stat_tree(int indent_level,
+                          struct rb_root *tag_stat_tree);
+void prdebug_iface_stat_list(int indent_level,
+                            struct list_head *iface_stat_list);
+
+#else
+
+/*------------------------------------------*/
+static inline char *pp_tag_t(tag_t *tag)
+{
+       return NULL;
+}
+static inline char *pp_data_counters(struct data_counters *dc, bool showValues)
+{
+       return NULL;
+}
+static inline char *pp_tag_node(struct tag_node *tn)
+{
+       return NULL;
+}
+static inline char *pp_tag_ref(struct tag_ref *tr)
+{
+       return NULL;
+}
+static inline char *pp_tag_stat(struct tag_stat *ts)
+{
+       return NULL;
+}
+static inline char *pp_iface_stat(struct iface_stat *is)
+{
+       return NULL;
+}
+static inline char *pp_sock_tag(struct sock_tag *st)
+{
+       return NULL;
+}
+static inline char *pp_uid_tag_data(struct uid_tag_data *qtd)
+{
+       return NULL;
+}
+static inline char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
+{
+       return NULL;
+}
+
+/*------------------------------------------*/
+static inline
+void prdebug_sock_tag_list(int indent_level,
+                          struct list_head *sock_tag_list)
+{
+}
+static inline
+void prdebug_sock_tag_tree(int indent_level,
+                          struct rb_root *sock_tag_tree)
+{
+}
+static inline
+void prdebug_proc_qtu_data_tree(int indent_level,
+                               struct rb_root *proc_qtu_data_tree)
+{
+}
+static inline
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
+{
+}
+static inline
+void prdebug_uid_tag_data_tree(int indent_level,
+                              struct rb_root *uid_tag_data_tree)
+{
+}
+static inline
+void prdebug_tag_stat_tree(int indent_level,
+                          struct rb_root *tag_stat_tree)
+{
+}
+static inline
+void prdebug_iface_stat_list(int indent_level,
+                            struct list_head *iface_stat_list)
+{
+}
+#endif
+/*------------------------------------------*/
+const char *netdev_evt_str(int netdev_event);
+#endif  /* ifndef __XT_QTAGUID_PRINT_H__ */
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
new file mode 100644 (file)
index 0000000..834594a
--- /dev/null
@@ -0,0 +1,401 @@
+/*
+ * xt_quota2 - enhanced xt_quota that can count upwards and in packets
+ * as a minimal accounting match.
+ * by Jan Engelhardt <jengelh@medozas.de>, 2008
+ *
+ * Originally based on xt_quota.c:
+ *     netfilter module to enforce network quotas
+ *     Sam Johnston <samj@samj.net>
+ *
+ *     This program is free software; you can redistribute it and/or modify
+ *     it under the terms of the GNU General Public License; either
+ *     version 2 of the License, as published by the Free Software Foundation.
+ */
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_quota2.h>
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+/* For compatibility, these definitions are copied from the
+ * deprecated header file <linux/netfilter_ipv4/ipt_ULOG.h> */
+#define ULOG_MAC_LEN   80
+#define ULOG_PREFIX_LEN        32
+
+/* Format of the ULOG packets passed through netlink */
+typedef struct ulog_packet_msg {
+       unsigned long mark;
+       long timestamp_sec;
+       long timestamp_usec;
+       unsigned int hook;
+       char indev_name[IFNAMSIZ];
+       char outdev_name[IFNAMSIZ];
+       size_t data_len;
+       char prefix[ULOG_PREFIX_LEN];
+       unsigned char mac_len;
+       unsigned char mac[ULOG_MAC_LEN];
+       unsigned char payload[0];
+} ulog_packet_msg_t;
+#endif
+
+/**
+ * @lock:      lock to protect quota writers from each other
+ */
+struct xt_quota_counter {
+       u_int64_t quota;
+       spinlock_t lock;
+       struct list_head list;
+       atomic_t ref;
+       char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
+       struct proc_dir_entry *procfs_entry;
+};
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+/* Harald's favorite number +1 :D From ipt_ULOG.C */
+static int qlog_nl_event = 112;
+module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(event_num,
+                "Event number for NETLINK_NFLOG message. 0 disables log."
+                "111 is what ipt_ULOG uses.");
+static struct sock *nflognl;
+#endif
+
+static LIST_HEAD(counter_list);
+static DEFINE_SPINLOCK(counter_list_lock);
+
+static struct proc_dir_entry *proc_xt_quota;
+static unsigned int quota_list_perms = S_IRUGO | S_IWUSR;
+static kuid_t quota_list_uid = KUIDT_INIT(0);
+static kgid_t quota_list_gid = KGIDT_INIT(0);
+module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+static void quota2_log(unsigned int hooknum,
+                      const struct sk_buff *skb,
+                      const struct net_device *in,
+                      const struct net_device *out,
+                      const char *prefix)
+{
+       ulog_packet_msg_t *pm;
+       struct sk_buff *log_skb;
+       size_t size;
+       struct nlmsghdr *nlh;
+
+       if (!qlog_nl_event)
+               return;
+
+       size = NLMSG_SPACE(sizeof(*pm));
+       size = max(size, (size_t)NLMSG_GOODSIZE);
+       log_skb = alloc_skb(size, GFP_ATOMIC);
+       if (!log_skb) {
+               pr_err("xt_quota2: cannot alloc skb for logging\n");
+               return;
+       }
+
+       nlh = nlmsg_put(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event,
+                       sizeof(*pm), 0);
+       if (!nlh) {
+               pr_err("xt_quota2: nlmsg_put failed\n");
+               kfree_skb(log_skb);
+               return;
+       }
+       pm = nlmsg_data(nlh);
+       if (skb->tstamp.tv64 == 0)
+               __net_timestamp((struct sk_buff *)skb);
+       pm->data_len = 0;
+       pm->hook = hooknum;
+       if (prefix != NULL)
+               strlcpy(pm->prefix, prefix, sizeof(pm->prefix));
+       else
+               *(pm->prefix) = '\0';
+       if (in)
+               strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name));
+       else
+               pm->indev_name[0] = '\0';
+
+       if (out)
+               strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
+       else
+               pm->outdev_name[0] = '\0';
+
+       NETLINK_CB(log_skb).dst_group = 1;
+       pr_debug("throwing 1 packets to netlink group 1\n");
+       netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC);
+}
+#else
+static void quota2_log(unsigned int hooknum,
+                      const struct sk_buff *skb,
+                      const struct net_device *in,
+                      const struct net_device *out,
+                      const char *prefix)
+{
+}
+#endif  /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */
+
+static ssize_t quota_proc_read(struct file *file, char __user *buf,
+                          size_t size, loff_t *ppos)
+{
+       struct xt_quota_counter *e = PDE_DATA(file_inode(file));
+       char tmp[24];
+       size_t tmp_size;
+
+       spin_lock_bh(&e->lock);
+       tmp_size = scnprintf(tmp, sizeof(tmp), "%llu\n", e->quota);
+       spin_unlock_bh(&e->lock);
+       return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
+}
+
+static ssize_t quota_proc_write(struct file *file, const char __user *input,
+                            size_t size, loff_t *ppos)
+{
+       struct xt_quota_counter *e = PDE_DATA(file_inode(file));
+       char buf[sizeof("18446744073709551616")];
+
+       if (size > sizeof(buf))
+               size = sizeof(buf);
+       if (copy_from_user(buf, input, size) != 0)
+               return -EFAULT;
+       buf[sizeof(buf)-1] = '\0';
+
+       spin_lock_bh(&e->lock);
+       e->quota = simple_strtoull(buf, NULL, 0);
+       spin_unlock_bh(&e->lock);
+       return size;
+}
+
+static const struct file_operations q2_counter_fops = {
+       .read           = quota_proc_read,
+       .write          = quota_proc_write,
+       .llseek         = default_llseek,
+};
+
+static struct xt_quota_counter *
+q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon)
+{
+       struct xt_quota_counter *e;
+       unsigned int size;
+
+       /* Do not need all the procfs things for anonymous counters. */
+       size = anon ? offsetof(typeof(*e), list) : sizeof(*e);
+       e = kmalloc(size, GFP_KERNEL);
+       if (e == NULL)
+               return NULL;
+
+       e->quota = q->quota;
+       spin_lock_init(&e->lock);
+       if (!anon) {
+               INIT_LIST_HEAD(&e->list);
+               atomic_set(&e->ref, 1);
+               strlcpy(e->name, q->name, sizeof(e->name));
+       }
+       return e;
+}
+
+/**
+ * q2_get_counter - get ref to counter or create new
+ * @name:      name of counter
+ */
+static struct xt_quota_counter *
+q2_get_counter(const struct xt_quota_mtinfo2 *q)
+{
+       struct proc_dir_entry *p;
+       struct xt_quota_counter *e = NULL;
+       struct xt_quota_counter *new_e;
+
+       if (*q->name == '\0')
+               return q2_new_counter(q, true);
+
+       /* No need to hold a lock while getting a new counter */
+       new_e = q2_new_counter(q, false);
+       if (new_e == NULL)
+               goto out;
+
+       spin_lock_bh(&counter_list_lock);
+       list_for_each_entry(e, &counter_list, list)
+               if (strcmp(e->name, q->name) == 0) {
+                       atomic_inc(&e->ref);
+                       spin_unlock_bh(&counter_list_lock);
+                       kfree(new_e);
+                       pr_debug("xt_quota2: old counter name=%s", e->name);
+                       return e;
+               }
+       e = new_e;
+       pr_debug("xt_quota2: new_counter name=%s", e->name);
+       list_add_tail(&e->list, &counter_list);
+       /* The entry having a refcount of 1 is not directly destructible.
+        * This func has not yet returned the new entry, thus iptables
+        * has not references for destroying this entry.
+        * For another rule to try to destroy it, it would 1st need for this
+        * func* to be re-invoked, acquire a new ref for the same named quota.
+        * Nobody will access the e->procfs_entry either.
+        * So release the lock. */
+       spin_unlock_bh(&counter_list_lock);
+
+       /* create_proc_entry() is not spin_lock happy */
+       p = e->procfs_entry = proc_create_data(e->name, quota_list_perms,
+                             proc_xt_quota, &q2_counter_fops, e);
+
+       if (IS_ERR_OR_NULL(p)) {
+               spin_lock_bh(&counter_list_lock);
+               list_del(&e->list);
+               spin_unlock_bh(&counter_list_lock);
+               goto out;
+       }
+       proc_set_user(p, quota_list_uid, quota_list_gid);
+       return e;
+
+ out:
+       kfree(e);
+       return NULL;
+}
+
+static int quota_mt2_check(const struct xt_mtchk_param *par)
+{
+       struct xt_quota_mtinfo2 *q = par->matchinfo;
+
+       pr_debug("xt_quota2: check() flags=0x%04x", q->flags);
+
+       if (q->flags & ~XT_QUOTA_MASK)
+               return -EINVAL;
+
+       q->name[sizeof(q->name)-1] = '\0';
+       if (*q->name == '.' || strchr(q->name, '/') != NULL) {
+               printk(KERN_ERR "xt_quota.3: illegal name\n");
+               return -EINVAL;
+       }
+
+       q->master = q2_get_counter(q);
+       if (q->master == NULL) {
+               printk(KERN_ERR "xt_quota.3: memory alloc failure\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
+{
+       struct xt_quota_mtinfo2 *q = par->matchinfo;
+       struct xt_quota_counter *e = q->master;
+
+       if (*q->name == '\0') {
+               kfree(e);
+               return;
+       }
+
+       spin_lock_bh(&counter_list_lock);
+       if (!atomic_dec_and_test(&e->ref)) {
+               spin_unlock_bh(&counter_list_lock);
+               return;
+       }
+
+       list_del(&e->list);
+       remove_proc_entry(e->name, proc_xt_quota);
+       spin_unlock_bh(&counter_list_lock);
+       kfree(e);
+}
+
+static bool
+quota_mt2(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       struct xt_quota_mtinfo2 *q = (void *)par->matchinfo;
+       struct xt_quota_counter *e = q->master;
+       bool ret = q->flags & XT_QUOTA_INVERT;
+
+       spin_lock_bh(&e->lock);
+       if (q->flags & XT_QUOTA_GROW) {
+               /*
+                * While no_change is pointless in "grow" mode, we will
+                * implement it here simply to have a consistent behavior.
+                */
+               if (!(q->flags & XT_QUOTA_NO_CHANGE)) {
+                       e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+               }
+               ret = true;
+       } else {
+               if (e->quota >= skb->len) {
+                       if (!(q->flags & XT_QUOTA_NO_CHANGE))
+                               e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+                       ret = !ret;
+               } else {
+                       /* We are transitioning, log that fact. */
+                       if (e->quota) {
+                               quota2_log(par->hooknum,
+                                          skb,
+                                          par->in,
+                                          par->out,
+                                          q->name);
+                       }
+                       /* we do not allow even small packets from now on */
+                       e->quota = 0;
+               }
+       }
+       spin_unlock_bh(&e->lock);
+       return ret;
+}
+
+static struct xt_match quota_mt2_reg[] __read_mostly = {
+       {
+               .name       = "quota2",
+               .revision   = 3,
+               .family     = NFPROTO_IPV4,
+               .checkentry = quota_mt2_check,
+               .match      = quota_mt2,
+               .destroy    = quota_mt2_destroy,
+               .matchsize  = sizeof(struct xt_quota_mtinfo2),
+               .me         = THIS_MODULE,
+       },
+       {
+               .name       = "quota2",
+               .revision   = 3,
+               .family     = NFPROTO_IPV6,
+               .checkentry = quota_mt2_check,
+               .match      = quota_mt2,
+               .destroy    = quota_mt2_destroy,
+               .matchsize  = sizeof(struct xt_quota_mtinfo2),
+               .me         = THIS_MODULE,
+       },
+};
+
+static int __init quota_mt2_init(void)
+{
+       int ret;
+       pr_debug("xt_quota2: init()");
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+       nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, NULL);
+       if (!nflognl)
+               return -ENOMEM;
+#endif
+
+       proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
+       if (proc_xt_quota == NULL)
+               return -EACCES;
+
+       ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+       if (ret < 0)
+               remove_proc_entry("xt_quota", init_net.proc_net);
+       pr_debug("xt_quota2: init() %d", ret);
+       return ret;
+}
+
+static void __exit quota_mt2_exit(void)
+{
+       xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+       remove_proc_entry("xt_quota", init_net.proc_net);
+}
+
+module_init(quota_mt2_init);
+module_exit(quota_mt2_exit);
+MODULE_DESCRIPTION("Xtables: countdown quota match; up counter");
+MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_quota2");
+MODULE_ALIAS("ip6t_quota2");
index 2ec08f04b816bc863e582fed000a2143d0b688a5..8a2a489b2cd39ad420bfcc6ba2e82e3a9bbcd4c2 100644 (file)
@@ -143,11 +143,12 @@ static bool xt_socket_sk_is_transparent(struct sock *sk)
        }
 }
 
-static struct sock *xt_socket_lookup_slow_v4(struct net *net,
+struct sock *xt_socket_lookup_slow_v4(struct net *net,
                                             const struct sk_buff *skb,
                                             const struct net_device *indev)
 {
        const struct iphdr *iph = ip_hdr(skb);
+       struct sock *sk = skb->sk;
        __be32 uninitialized_var(daddr), uninitialized_var(saddr);
        __be16 uninitialized_var(dport), uninitialized_var(sport);
        u8 uninitialized_var(protocol);
@@ -198,9 +199,16 @@ static struct sock *xt_socket_lookup_slow_v4(struct net *net,
        }
 #endif
 
-       return xt_socket_get_sock_v4(net, protocol, saddr, daddr,
-                                    sport, dport, indev);
+       if (sk)
+               atomic_inc(&sk->sk_refcnt);
+       else
+               sk = xt_socket_get_sock_v4(dev_net(skb->dev), protocol,
+                                          saddr, daddr, sport, dport,
+                                          indev);
+
+       return sk;
 }
+EXPORT_SYMBOL(xt_socket_lookup_slow_v4);
 
 static bool
 socket_match(const struct sk_buff *skb, struct xt_action_param *par,
@@ -232,8 +240,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
                    transparent)
                        pskb->mark = sk->sk_mark;
 
-               if (sk != skb->sk)
-                       sock_gen_put(sk);
+               sock_gen_put(sk);
 
                if (wildcard || !transparent)
                        sk = NULL;
@@ -336,10 +343,11 @@ xt_socket_get_sock_v6(struct net *net, const u8 protocol,
        return NULL;
 }
 
-static struct sock *xt_socket_lookup_slow_v6(struct net *net,
+struct sock *xt_socket_lookup_slow_v6(struct net *net,
                                             const struct sk_buff *skb,
                                             const struct net_device *indev)
 {
+       struct sock *sk = skb->sk;
        __be16 uninitialized_var(dport), uninitialized_var(sport);
        const struct in6_addr *daddr = NULL, *saddr = NULL;
        struct ipv6hdr *iph = ipv6_hdr(skb);
@@ -373,9 +381,16 @@ static struct sock *xt_socket_lookup_slow_v6(struct net *net,
                return NULL;
        }
 
-       return xt_socket_get_sock_v6(net, tproto, saddr, daddr,
-                                    sport, dport, indev);
+       if (sk)
+               atomic_inc(&sk->sk_refcnt);
+       else
+               sk = xt_socket_get_sock_v6(dev_net(skb->dev), tproto,
+                                          saddr, daddr, sport, dport,
+                                          indev);
+
+       return sk;
 }
+EXPORT_SYMBOL(xt_socket_lookup_slow_v6);
 
 static bool
 socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
index 598d374f6a35f714db4753efa7ab28d6782419a9..44c3be9d10215956704595ef2a9db849cac12249 100644 (file)
@@ -10,6 +10,11 @@ menuconfig RFKILL
          To compile this driver as a module, choose M here: the
          module will be called rfkill.
 
+config RFKILL_PM
+       bool "Power off on suspend"
+       depends on RFKILL && PM
+       default y
+
 # LED trigger support
 config RFKILL_LEDS
        bool
index cf5b69ab18294bb2fa36a00f2a19dbf02d0f3045..d778d99326df703a29df879bdea95253382a9c7a 100644 (file)
@@ -802,7 +802,7 @@ void rfkill_resume_polling(struct rfkill *rfkill)
 }
 EXPORT_SYMBOL(rfkill_resume_polling);
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_RFKILL_PM
 static int rfkill_suspend(struct device *dev)
 {
        struct rfkill *rfkill = to_rfkill(dev);
@@ -838,7 +838,9 @@ static struct class rfkill_class = {
        .dev_release    = rfkill_release,
        .dev_groups     = rfkill_dev_groups,
        .dev_uevent     = rfkill_dev_uevent,
+#ifdef CONFIG_RFKILL_PM
        .pm             = RFKILL_PM_OPS,
+#endif
 };
 
 bool rfkill_blocked(struct rfkill *rfkill)
index 8dde12a11725877104d040bf04251be80781eb32..6e7b86ca2abd6cbc0f72de95513307d980347769 100644 (file)
@@ -69,7 +69,7 @@ module_param(bss_entries_limit, int, 0644);
 MODULE_PARM_DESC(bss_entries_limit,
                  "limit to number of scan BSS entries (per wiphy, default 1000)");
 
-#define IEEE80211_SCAN_RESULT_EXPIRE   (30 * HZ)
+#define IEEE80211_SCAN_RESULT_EXPIRE   (7 * HZ)
 
 static void bss_free(struct cfg80211_internal_bss *bss)
 {
index 55c96cb8070f1130352ab85f77440dfd497d5677..a3245aea56e116ef10256bc134c1e285bfcd45ac 100644 (file)
@@ -11,7 +11,7 @@ include scripts/Kbuild.include
 
 # The filename Kbuild has precedence over Makefile
 kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
-include $(if $(wildcard $(kbuild-dir)/Kbuild), $(kbuild-dir)/Kbuild, $(kbuild-dir)/Makefile)
+-include $(if $(wildcard $(kbuild-dir)/Kbuild), $(kbuild-dir)/Kbuild, $(kbuild-dir)/Makefile)
 
 # Figure out what we need to build from the various variables
 # ==========================================================================
index 79e86613712f22308b8ad4c3442403e09770f2ed..c84080885ad4f9299d979030919f65673cc0ca84 100644 (file)
@@ -294,6 +294,12 @@ $(obj)/%.dtb: $(src)/%.dts FORCE
 
 dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
 
+# cat
+# ---------------------------------------------------------------------------
+# Concatentate multiple files together
+quiet_cmd_cat = CAT     $@
+cmd_cat = (cat $(filter-out FORCE,$^) > $@) || (rm -f $@; false)
+
 # Bzip2
 # ---------------------------------------------------------------------------
 
index 07650eeaaf06dd85bb5f3439959ed05c36d8a5ff..6f4c3f5a7ae39b186019c4d4f587fedef50af699 100644 (file)
@@ -29,7 +29,7 @@ quiet_cmd_modules_install = INSTALL $@
 INSTALL_MOD_DIR ?= extra
 ext-mod-dir = $(INSTALL_MOD_DIR)$(subst $(patsubst %/,%,$(KBUILD_EXTMOD)),,$(@D))
 
-modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
+modinst_dir ?= $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
 
 $(modules):
        $(call cmd,modules_install,$(MODLIB)/$(modinst_dir))
index ddb3e8a8d9bd4cf0ae9c9e77f719c7009035ddda..3aa60791f84d687eeab11e5b437e9e37c039d32c 100644 (file)
@@ -18,6 +18,15 @@ config SECURITY_DMESG_RESTRICT
 
          If you are unsure how to answer this question, answer N.
 
+config SECURITY_PERF_EVENTS_RESTRICT
+       bool "Restrict unprivileged use of performance events"
+       depends on PERF_EVENTS
+       help
+         If you say Y here, the kernel.perf_event_paranoid sysctl
+         will be set to 3 by default, and no unprivileged use of the
+         perf_event_open syscall will be permitted unless it is
+         changed.
+
 config SECURITY
        bool "Enable different security models"
        depends on SYSFS
@@ -136,6 +145,7 @@ config HAVE_ARCH_HARDENED_USERCOPY
 config HARDENED_USERCOPY
        bool "Harden memory copies between kernel and userspace"
        depends on HAVE_ARCH_HARDENED_USERCOPY
+       depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
        select BUG
        help
          This option checks for obviously wrong memory regions when
index 48071ed7c445d025fa4ae57c12f032bfa916521f..7fa251aea32f88d3bd50be9d06b890991d1eec9e 100644 (file)
 #include <linux/binfmts.h>
 #include <linux/personality.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+#endif
+
 /*
  * If a non-root user executes a setuid-root binary in
  * !secure(SECURE_NOROOT) mode, then we raise capabilities.
@@ -73,6 +77,13 @@ int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
 {
        struct user_namespace *ns = targ_ns;
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+       if (cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW))
+               return 0;
+       if (cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN))
+               return 0;
+#endif
+
        /* See if cred has the capability in the target user namespace
         * by examining the target user namespace and all of the target
         * user namespace's parents.
index cccbf3068cdca800eb53e49caf1d29de9d93b035..45d927ab807d82425b470d1e597b786fa91b7259 100644 (file)
@@ -220,7 +220,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
         */
        BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);
 
-       audit_log_format(ab, " pid=%d comm=", task_pid_nr(current));
+       audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current));
        audit_log_untrustedstring(ab, memcpy(comm, current->comm, sizeof(comm)));
 
        switch (a->type) {
@@ -294,7 +294,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
        case LSM_AUDIT_DATA_TASK: {
                struct task_struct *tsk = a->u.tsk;
                if (tsk) {
-                       pid_t pid = task_pid_nr(tsk);
+                       pid_t pid = task_tgid_nr(tsk);
                        if (pid) {
                                char comm[sizeof(tsk->comm)];
                                audit_log_format(ab, " opid=%d ocomm=", pid);
index d0cfaa9f19d08034a3c3600d8381ed166a369388..94a0bfc748d175adbec213a231532e5dc7947bba 100644 (file)
@@ -420,6 +420,7 @@ static int selinux_is_sblabel_mnt(struct super_block *sb)
                !strcmp(sb->s_type->name, "sysfs") ||
                !strcmp(sb->s_type->name, "pstore") ||
                !strcmp(sb->s_type->name, "debugfs") ||
+               !strcmp(sb->s_type->name, "tracefs") ||
                !strcmp(sb->s_type->name, "rootfs");
 }
 
@@ -3653,6 +3654,38 @@ static int selinux_kernel_module_request(char *kmod_name)
                            SYSTEM__MODULE_REQUEST, &ad);
 }
 
+static int selinux_kernel_module_from_file(struct file *file)
+{
+       struct common_audit_data ad;
+       struct inode_security_struct *isec;
+       struct file_security_struct *fsec;
+       struct inode *inode;
+       u32 sid = current_sid();
+       int rc;
+
+       /* init_module */
+       if (file == NULL)
+               return avc_has_perm(sid, sid, SECCLASS_SYSTEM,
+                                       SYSTEM__MODULE_LOAD, NULL);
+
+       /* finit_module */
+       ad.type = LSM_AUDIT_DATA_PATH;
+       ad.u.path = file->f_path;
+
+       inode = file_inode(file);
+       isec = inode->i_security;
+       fsec = file->f_security;
+
+       if (sid != fsec->sid) {
+               rc = avc_has_perm(sid, fsec->sid, SECCLASS_FD, FD__USE, &ad);
+               if (rc)
+                       return rc;
+       }
+
+       return avc_has_perm(sid, isec->sid, SECCLASS_SYSTEM,
+                               SYSTEM__MODULE_LOAD, &ad);
+}
+
 static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
 {
        return current_has_perm(p, PROCESS__SETPGID);
@@ -5943,6 +5976,7 @@ static struct security_hook_list selinux_hooks[] = {
        LSM_HOOK_INIT(kernel_act_as, selinux_kernel_act_as),
        LSM_HOOK_INIT(kernel_create_files_as, selinux_kernel_create_files_as),
        LSM_HOOK_INIT(kernel_module_request, selinux_kernel_module_request),
+       LSM_HOOK_INIT(kernel_module_from_file, selinux_kernel_module_from_file),
        LSM_HOOK_INIT(task_setpgid, selinux_task_setpgid),
        LSM_HOOK_INIT(task_getpgid, selinux_task_getpgid),
        LSM_HOOK_INIT(task_getsid, selinux_task_getsid),
index 5a4eef59aeff97676e8b2d2ef94bc45ca9315201..b393d29ae85713b85b7822f17571db7f51c5418b 100644 (file)
@@ -32,7 +32,7 @@ struct security_class_mapping secclass_map[] = {
            "setsockcreate", NULL } },
        { "system",
          { "ipc_info", "syslog_read", "syslog_mod",
-           "syslog_console", "module_request", NULL } },
+           "syslog_console", "module_request", "module_load", NULL } },
        { "capability",
          { "chown", "dac_override", "dac_read_search",
            "fowner", "fsetid", "kill", "setgid", "setuid", "setpcap",
index 3628d3a868b669c9aa9267808533aaa5dc8c0d1c..31345376aa9b7ff78be9412c7b07866660bb9cad 100644 (file)
@@ -374,6 +374,32 @@ void avtab_hash_eval(struct avtab *h, char *tag)
               chain2_len_sum);
 }
 
+/*
+ * extended permissions compatibility. Make ToT Android kernels compatible
+ * with Android M releases
+ */
+#define AVTAB_OPTYPE_ALLOWED   0x1000
+#define AVTAB_OPTYPE_AUDITALLOW        0x2000
+#define AVTAB_OPTYPE_DONTAUDIT 0x4000
+#define AVTAB_OPTYPE           (AVTAB_OPTYPE_ALLOWED | \
+                               AVTAB_OPTYPE_AUDITALLOW | \
+                               AVTAB_OPTYPE_DONTAUDIT)
+#define AVTAB_XPERMS_OPTYPE    4
+
+#define avtab_xperms_to_optype(x) (x << AVTAB_XPERMS_OPTYPE)
+#define avtab_optype_to_xperms(x) (x >> AVTAB_XPERMS_OPTYPE)
+
+static unsigned int avtab_android_m_compat;
+
+static void avtab_android_m_compat_set(void)
+{
+       if (!avtab_android_m_compat) {
+               pr_info("SELinux:  Android master kernel running Android"
+                               " M policy in compatibility mode.\n");
+               avtab_android_m_compat = 1;
+       }
+}
+
 static uint16_t spec_order[] = {
        AVTAB_ALLOWED,
        AVTAB_AUDITDENY,
@@ -398,6 +424,7 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
        struct avtab_datum datum;
        struct avtab_extended_perms xperms;
        __le32 buf32[ARRAY_SIZE(xperms.perms.p)];
+       unsigned int android_m_compat_optype = 0;
        int i, rc;
        unsigned set;
 
@@ -488,6 +515,13 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
        key.target_class = le16_to_cpu(buf16[items++]);
        key.specified = le16_to_cpu(buf16[items++]);
 
+       if ((key.specified & AVTAB_OPTYPE) &&
+                       (vers == POLICYDB_VERSION_XPERMS_IOCTL)) {
+               key.specified = avtab_optype_to_xperms(key.specified);
+               android_m_compat_optype = 1;
+               avtab_android_m_compat_set();
+       }
+
        if (!policydb_type_isvalid(pol, key.source_type) ||
            !policydb_type_isvalid(pol, key.target_type) ||
            !policydb_class_isvalid(pol, key.target_class)) {
@@ -518,10 +552,22 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
                        printk(KERN_ERR "SELinux: avtab: truncated entry\n");
                        return rc;
                }
-               rc = next_entry(&xperms.driver, fp, sizeof(u8));
-               if (rc) {
-                       printk(KERN_ERR "SELinux: avtab: truncated entry\n");
-                       return rc;
+               if (avtab_android_m_compat ||
+                           ((xperms.specified != AVTAB_XPERMS_IOCTLFUNCTION) &&
+                           (xperms.specified != AVTAB_XPERMS_IOCTLDRIVER) &&
+                           (vers == POLICYDB_VERSION_XPERMS_IOCTL))) {
+                       xperms.driver = xperms.specified;
+                       if (android_m_compat_optype)
+                               xperms.specified = AVTAB_XPERMS_IOCTLDRIVER;
+                       else
+                               xperms.specified = AVTAB_XPERMS_IOCTLFUNCTION;
+                       avtab_android_m_compat_set();
+               } else {
+                       rc = next_entry(&xperms.driver, fp, sizeof(u8));
+                       if (rc) {
+                               printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+                               return rc;
+                       }
                }
                rc = next_entry(buf32, fp, sizeof(u32)*ARRAY_SIZE(xperms.perms.p));
                if (rc) {
@@ -607,15 +653,22 @@ int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp)
        buf16[0] = cpu_to_le16(cur->key.source_type);
        buf16[1] = cpu_to_le16(cur->key.target_type);
        buf16[2] = cpu_to_le16(cur->key.target_class);
-       buf16[3] = cpu_to_le16(cur->key.specified);
+       if (avtab_android_m_compat && (cur->key.specified & AVTAB_XPERMS) &&
+                   (cur->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER))
+               buf16[3] = cpu_to_le16(avtab_xperms_to_optype(cur->key.specified));
+       else
+               buf16[3] = cpu_to_le16(cur->key.specified);
        rc = put_entry(buf16, sizeof(u16), 4, fp);
        if (rc)
                return rc;
 
        if (cur->key.specified & AVTAB_XPERMS) {
-               rc = put_entry(&cur->datum.u.xperms->specified, sizeof(u8), 1, fp);
-               if (rc)
-                       return rc;
+               if (avtab_android_m_compat == 0) {
+                       rc = put_entry(&cur->datum.u.xperms->specified,
+                                       sizeof(u8), 1, fp);
+                       if (rc)
+                               return rc;
+               }
                rc = put_entry(&cur->datum.u.xperms->driver, sizeof(u8), 1, fp);
                if (rc)
                        return rc;
index 39a8bd842d0df73b216c89e5b15d10e9be667252..6e0a16c7176a17c6acc46eb8703d44ba9fa19bdc 100644 (file)
@@ -2356,12 +2356,15 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
        case EPERM:
        case EACCES:
                return scnprintf(msg, size,
-                "You may not have permission to collect %sstats.\n"
-                "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
-                " -1 - Not paranoid at all\n"
-                "  0 - Disallow raw tracepoint access for unpriv\n"
-                "  1 - Disallow cpu events for unpriv\n"
-                "  2 - Disallow kernel profiling for unpriv",
+                "You may not have permission to collect %sstats.\n\n"
+                "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
+                "which controls use of the performance events system by\n"
+                "unprivileged users (without CAP_SYS_ADMIN).\n\n"
+                "The default value is 1:\n\n"
+                "  -1: Allow use of (almost) all events by all users\n"
+                ">= 0: Disallow raw tracepoint access by users without CAP_IOC_LOCK\n"
+                ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
+                ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN",
                                 target->system_wide ? "system-wide " : "");
        case ENOENT:
                return scnprintf(msg, size, "The %s event is not supported.",