Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
authorMark Brown <broonie@kernel.org>
Fri, 29 Aug 2014 20:38:17 +0000 (21:38 +0100)
committerMark Brown <broonie@kernel.org>
Fri, 29 Aug 2014 20:38:17 +0000 (21:38 +0100)
404 files changed:
Documentation/android.txt [new file with mode: 0644]
Documentation/cgroups/cgroups.txt
Documentation/cpu-freq/governors.txt
Documentation/filesystems/proc.txt
Documentation/networking/ip-sysctl.txt
Documentation/sync.txt [new file with mode: 0644]
Documentation/sysctl/vm.txt
Documentation/trace/ftrace.txt
android/configs/README [new file with mode: 0644]
android/configs/android-base.cfg [new file with mode: 0644]
android/configs/android-recommended.cfg [new file with mode: 0644]
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/Makefile
arch/arm/boot/Makefile
arch/arm/boot/compressed/head.S
arch/arm/common/Kconfig
arch/arm/common/Makefile
arch/arm/common/fiq_glue.S [new file with mode: 0644]
arch/arm/common/fiq_glue_setup.c [new file with mode: 0644]
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/fiq_glue.h [new file with mode: 0644]
arch/arm/include/asm/hardirq.h
arch/arm/include/asm/hardware/cache-l2x0.h
arch/arm/include/asm/hardware/coresight.h
arch/arm/include/asm/irq.h
arch/arm/include/asm/mach/mmc.h [new file with mode: 0644]
arch/arm/include/asm/rodata.h [new file with mode: 0644]
arch/arm/include/asm/smp.h
arch/arm/kernel/etm.c
arch/arm/kernel/ftrace.c
arch/arm/kernel/kgdb.c
arch/arm/kernel/process.c
arch/arm/kernel/smp.c
arch/arm/mm/Makefile
arch/arm/mm/cache-l2x0.c
arch/arm/mm/cache-v6.S
arch/arm/mm/fault.c
arch/arm/mm/mmu.c
arch/arm/mm/rodata.c [new file with mode: 0644]
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/.gitignore
arch/arm64/boot/Makefile
arch/arm64/boot/dts/Makefile
arch/arm64/include/asm/cmpxchg.h
arch/x86/include/asm/idle.h
arch/x86/kernel/process.c
block/genhd.c
block/partition-generic.c
drivers/Kconfig
drivers/Makefile
drivers/base/power/main.c
drivers/char/Kconfig
drivers/char/Makefile
drivers/char/dcc_tty.c [new file with mode: 0644]
drivers/char/mem.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Makefile
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_governor.h
drivers/cpufreq/cpufreq_interactive.c [new file with mode: 0644]
drivers/cpufreq/cpufreq_stats.c
drivers/cpuidle/governors/menu.c
drivers/gpio/gpiolib.c
drivers/hid/hid-debug.c
drivers/hid/hid-input.c
drivers/hid/hid-multitouch.c
drivers/iio/industrialio-event.c
drivers/input/Kconfig
drivers/input/Makefile
drivers/input/evdev.c
drivers/input/keycombo.c [new file with mode: 0644]
drivers/input/keyreset.c [new file with mode: 0644]
drivers/input/misc/Kconfig
drivers/input/misc/Makefile
drivers/input/misc/gpio_axis.c [new file with mode: 0644]
drivers/input/misc/gpio_event.c [new file with mode: 0644]
drivers/input/misc/gpio_input.c [new file with mode: 0644]
drivers/input/misc/gpio_matrix.c [new file with mode: 0644]
drivers/input/misc/gpio_output.c [new file with mode: 0644]
drivers/input/misc/keychord.c [new file with mode: 0644]
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/uid_stat.c [new file with mode: 0644]
drivers/mmc/card/Kconfig
drivers/mmc/card/block.c
drivers/mmc/core/Kconfig
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/sd.c
drivers/mmc/core/sdio.c
drivers/mmc/core/sdio_bus.c
drivers/mmc/core/sdio_io.c [changed mode: 0644->0755]
drivers/mtd/nand/Kconfig
drivers/net/ppp/Kconfig
drivers/net/ppp/Makefile
drivers/net/ppp/pppolac.c [new file with mode: 0644]
drivers/net/ppp/pppopns.c [new file with mode: 0644]
drivers/net/tun.c
drivers/net/wireless/Kconfig
drivers/of/fdt.c
drivers/power/power_supply_core.c
drivers/power/power_supply_sysfs.c
drivers/staging/android/Kconfig
drivers/staging/android/Makefile
drivers/staging/android/TODO [deleted file]
drivers/staging/android/android_alarm.h
drivers/staging/android/ashmem.c
drivers/staging/android/ashmem.h
drivers/staging/android/binder.c
drivers/staging/android/binder.h
drivers/staging/android/binder_trace.h
drivers/staging/android/fiq_debugger/Kconfig [new file with mode: 0644]
drivers/staging/android/fiq_debugger/Makefile [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_debugger.c [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_debugger.h [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_debugger_arm.c [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_debugger_priv.h [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_watchdog.c [new file with mode: 0644]
drivers/staging/android/fiq_debugger/fiq_watchdog.h [new file with mode: 0644]
drivers/staging/android/ion/Kconfig [new file with mode: 0644]
drivers/staging/android/ion/Makefile [new file with mode: 0644]
drivers/staging/android/ion/compat_ion.c [new file with mode: 0644]
drivers/staging/android/ion/compat_ion.h [new file with mode: 0644]
drivers/staging/android/ion/ion.c [new file with mode: 0644]
drivers/staging/android/ion/ion.h [new file with mode: 0644]
drivers/staging/android/ion/ion_carveout_heap.c [new file with mode: 0644]
drivers/staging/android/ion/ion_chunk_heap.c [new file with mode: 0644]
drivers/staging/android/ion/ion_cma_heap.c [new file with mode: 0644]
drivers/staging/android/ion/ion_dummy_driver.c [new file with mode: 0644]
drivers/staging/android/ion/ion_heap.c [new file with mode: 0644]
drivers/staging/android/ion/ion_page_pool.c [new file with mode: 0644]
drivers/staging/android/ion/ion_priv.h [new file with mode: 0644]
drivers/staging/android/ion/ion_system_heap.c [new file with mode: 0644]
drivers/staging/android/ion/ion_test.c [new file with mode: 0644]
drivers/staging/android/ion/tegra/Makefile [new file with mode: 0644]
drivers/staging/android/ion/tegra/tegra_ion.c [new file with mode: 0644]
drivers/staging/android/lowmemorykiller.c
drivers/staging/android/sw_sync.h
drivers/staging/android/sync.c
drivers/staging/android/sync.h
drivers/staging/android/uapi/android_alarm.h [new file with mode: 0644]
drivers/staging/android/uapi/ashmem.h [new file with mode: 0644]
drivers/staging/android/uapi/binder.h [new file with mode: 0644]
drivers/staging/android/uapi/ion.h [new file with mode: 0644]
drivers/staging/android/uapi/ion_test.h [new file with mode: 0644]
drivers/staging/android/uapi/sw_sync.h [new file with mode: 0644]
drivers/staging/android/uapi/sync.h [new file with mode: 0644]
drivers/switch/Kconfig [new file with mode: 0644]
drivers/switch/Makefile [new file with mode: 0644]
drivers/switch/switch_class.c [new file with mode: 0644]
drivers/switch/switch_gpio.c [new file with mode: 0644]
drivers/tty/serial/serial_core.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/Makefile
drivers/usb/gadget/android.c [new file with mode: 0644]
drivers/usb/gadget/composite.c
drivers/usb/gadget/f_accessory.c [new file with mode: 0644]
drivers/usb/gadget/f_audio_source.c [new file with mode: 0644]
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/f_mtp.c [new file with mode: 0644]
drivers/usb/gadget/f_rndis.c
drivers/usb/gadget/rndis.c
drivers/usb/gadget/u_serial.c
drivers/usb/gadget/udc-core.c
drivers/usb/phy/Kconfig
drivers/usb/phy/Makefile
drivers/usb/phy/otg-wakelock.c [new file with mode: 0644]
drivers/video/Kconfig
drivers/video/Makefile
drivers/video/adf/Kconfig [new file with mode: 0644]
drivers/video/adf/Makefile [new file with mode: 0644]
drivers/video/adf/adf.c [new file with mode: 0644]
drivers/video/adf/adf.h [new file with mode: 0644]
drivers/video/adf/adf_client.c [new file with mode: 0644]
drivers/video/adf/adf_fbdev.c [new file with mode: 0644]
drivers/video/adf/adf_fops.c [new file with mode: 0644]
drivers/video/adf/adf_fops.h [new file with mode: 0644]
drivers/video/adf/adf_fops32.c [new file with mode: 0644]
drivers/video/adf/adf_fops32.h [new file with mode: 0644]
drivers/video/adf/adf_format.c [new file with mode: 0644]
drivers/video/adf/adf_memblock.c [new file with mode: 0644]
drivers/video/adf/adf_sysfs.c [new file with mode: 0644]
drivers/video/adf/adf_sysfs.h [new file with mode: 0644]
drivers/video/adf/adf_trace.h [new file with mode: 0644]
drivers/w1/masters/ds2482.c
fs/cifs/transport.c
fs/eventpoll.c
fs/fat/dir.c
fs/fat/fat.h
fs/fat/inode.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/nfs/inode.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4proc.c
fs/proc/base.c
fs/proc/task_mmu.c
fs/pstore/ram.c
fs/select.c
fs/timerfd.c
include/linux/Kbuild [new file with mode: 0644]
include/linux/alarmtimer.h
include/linux/amba/mmci.h
include/linux/android_aid.h [new file with mode: 0644]
include/linux/cgroup.h
include/linux/cpu.h
include/linux/cpufreq.h
include/linux/debug_locks.h
include/linux/freezer.h
include/linux/gpio_event.h [new file with mode: 0644]
include/linux/hid.h
include/linux/if_pppolac.h [new file with mode: 0644]
include/linux/if_pppopns.h [new file with mode: 0644]
include/linux/if_pppox.h
include/linux/ipv6.h
include/linux/kernel.h
include/linux/keychord.h [new file with mode: 0644]
include/linux/keycombo.h [new file with mode: 0644]
include/linux/keyreset.h [new file with mode: 0644]
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmc/host.h
include/linux/mmc/pm.h
include/linux/mmc/sdio_func.h [changed mode: 0644->0755]
include/linux/netfilter/xt_qtaguid.h [new file with mode: 0644]
include/linux/netfilter/xt_quota2.h [new file with mode: 0644]
include/linux/nmi.h
include/linux/of_fdt.h
include/linux/platform_data/ds2482.h [new file with mode: 0644]
include/linux/power_supply.h
include/linux/pstore_ram.h
include/linux/sched.h
include/linux/security.h
include/linux/serial_core.h
include/linux/switch.h [new file with mode: 0644]
include/linux/uid_stat.h [new file with mode: 0644]
include/linux/usb/f_accessory.h [new file with mode: 0644]
include/linux/usb/f_mtp.h [new file with mode: 0644]
include/linux/wakelock.h [new file with mode: 0644]
include/linux/wakeup_reason.h [new file with mode: 0644]
include/linux/wifi_tiwlan.h [new file with mode: 0644]
include/linux/wlan_plat.h [new file with mode: 0644]
include/net/activity_stats.h [new file with mode: 0644]
include/net/addrconf.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/sco.h
include/net/cfg80211.h
include/net/inet_sock.h
include/net/ip.h
include/net/ipv6.h
include/net/netns/ipv4.h
include/net/netns/ipv6.h
include/net/ping.h
include/net/tcp.h
include/net/transp_v6.h
include/trace/events/cpufreq_interactive.h [new file with mode: 0644]
include/trace/events/gpu.h [new file with mode: 0644]
include/trace/events/mmc.h [new file with mode: 0644]
include/trace/events/power.h
include/uapi/linux/eventpoll.h
include/uapi/linux/if_pppolac.h [new file with mode: 0644]
include/uapi/linux/if_pppopns.h [new file with mode: 0644]
include/uapi/linux/if_pppox.h
include/uapi/linux/input.h
include/uapi/linux/ipv6.h
include/uapi/linux/keychord.h [new file with mode: 0644]
include/uapi/linux/msdos_fs.h
include/uapi/linux/netfilter/xt_IDLETIMER.h
include/uapi/linux/netfilter/xt_socket.h
include/uapi/linux/nl80211.h
include/uapi/linux/prctl.h
include/uapi/linux/sockios.h
include/uapi/linux/usb/f_accessory.h [new file with mode: 0644]
include/uapi/linux/usb/f_mtp.h [new file with mode: 0644]
include/uapi/video/adf.h [new file with mode: 0644]
include/video/adf.h [new file with mode: 0644]
include/video/adf_client.h [new file with mode: 0644]
include/video/adf_fbdev.h [new file with mode: 0644]
include/video/adf_format.h [new file with mode: 0644]
include/video/adf_memblock.h [new file with mode: 0644]
init/Kconfig
kernel/cgroup.c
kernel/cpu.c
kernel/debug/debug_core.c
kernel/debug/kdb/kdb_io.c
kernel/exit.c
kernel/fork.c
kernel/freezer.c
kernel/futex.c
kernel/hrtimer.c
kernel/irq/pm.c
kernel/lockdep.c
kernel/panic.c
kernel/power/Kconfig
kernel/power/Makefile
kernel/power/process.c
kernel/power/suspend.c
kernel/power/suspend_time.c [new file with mode: 0644]
kernel/power/wakelock.c
kernel/power/wakeup_reason.c [new file with mode: 0644]
kernel/sched/core.c
kernel/signal.c
kernel/sys.c
kernel/sysctl.c
kernel/time/alarmtimer.c
kernel/trace/Kconfig
kernel/trace/Makefile
kernel/trace/gpu-traces.c [new file with mode: 0644]
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_functions_graph.c
kernel/trace/trace_output.c
kernel/watchdog.c
lib/Kconfig.debug
mm/madvise.c
mm/mempolicy.c
mm/mlock.c
mm/mmap.c
mm/mprotect.c
mm/page_alloc.c
mm/page_io.c
mm/shmem.c
mm/vmscan.c
net/Kconfig
net/Makefile
net/activity_stats.c [new file with mode: 0644]
net/bluetooth/af_bluetooth.c
net/bluetooth/amp.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c [changed mode: 0644->0755]
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/core.c
net/bluetooth/sco.c
net/bridge/br_device.c
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/devinet.c
net/ipv4/icmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_output.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/ipt_REJECT.c
net/ipv4/ping.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/sysfs_net_ipv4.c [new file with mode: 0644]
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv6/Makefile
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/datagram.c
net/ipv6/exthdrs_core.c
net/ipv6/icmp.c
net/ipv6/inet6_connection_sock.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/ip6t_REJECT.c
net/ipv6/ping.c [new file with mode: 0644]
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/syncookies.c
net/ipv6/sysctl_net_ipv6.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/xt_IDLETIMER.c
net/netfilter/xt_qtaguid.c [new file with mode: 0644]
net/netfilter/xt_qtaguid_internal.h [new file with mode: 0644]
net/netfilter/xt_qtaguid_print.c [new file with mode: 0644]
net/netfilter/xt_qtaguid_print.h [new file with mode: 0644]
net/netfilter/xt_quota2.c [new file with mode: 0644]
net/netfilter/xt_socket.c
net/rfkill/Kconfig
net/rfkill/core.c
net/sunrpc/sched.c
net/unix/af_unix.c
net/wireless/Kconfig
net/wireless/core.h
net/wireless/nl80211.c
net/wireless/scan.c
net/wireless/sme.c
scripts/Makefile.lib
scripts/kconfig/confdata.c
scripts/kconfig/expr.h
scripts/kconfig/lkc.h
scripts/kconfig/symbol.c
security/capability.c
security/commoncap.c
security/security.c
security/selinux/avc.c
security/selinux/hooks.c
security/selinux/include/avc.h
security/selinux/include/classmap.h

diff --git a/Documentation/android.txt b/Documentation/android.txt
new file mode 100644 (file)
index 0000000..0f40a78
--- /dev/null
@@ -0,0 +1,121 @@
+                               =============
+                               A N D R O I D
+                               =============
+
+Copyright (C) 2009 Google, Inc.
+Written by Mike Chan <mike@android.com>
+
+CONTENTS:
+---------
+
+1. Android
+  1.1 Required enabled config options
+  1.2 Required disabled config options
+  1.3 Recommended enabled config options
+2. Contact
+
+
+1. Android
+==========
+
+Android (www.android.com) is an open source operating system for mobile devices.
+This document describes configurations needed to run the Android framework on
+top of the Linux kernel.
+
+To see a working defconfig look at msm_defconfig or goldfish_defconfig
+which can be found at http://android.git.kernel.org in kernel/common.git
+and kernel/msm.git
+
+
+1.1 Required enabled config options
+-----------------------------------
+After building a standard defconfig, ensure that these options are enabled in
+your .config or defconfig if they are not already. Based off the msm_defconfig.
+You should keep the rest of the default options enabled in the defconfig
+unless you know what you are doing.
+
+ANDROID_PARANOID_NETWORK
+ASHMEM
+CONFIG_FB_MODE_HELPERS
+CONFIG_FONT_8x16
+CONFIG_FONT_8x8
+CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+DAB
+EARLYSUSPEND
+FB
+FB_CFB_COPYAREA
+FB_CFB_FILLRECT
+FB_CFB_IMAGEBLIT
+FB_DEFERRED_IO
+FB_TILEBLITTING
+HIGH_RES_TIMERS
+INOTIFY
+INOTIFY_USER
+INPUT_EVDEV
+INPUT_GPIO
+INPUT_MISC
+LEDS_CLASS
+LEDS_GPIO
+LOCK_KERNEL
+LkOGGER
+LOW_MEMORY_KILLER
+MISC_DEVICES
+NEW_LEDS
+NO_HZ
+POWER_SUPPLY
+PREEMPT
+RAMFS
+RTC_CLASS
+RTC_LIB
+SWITCH
+SWITCH_GPIO
+TMPFS
+UID_STAT
+UID16
+USB_FUNCTION
+USB_FUNCTION_ADB
+USER_WAKELOCK
+VIDEO_OUTPUT_CONTROL
+WAKELOCK
+YAFFS_AUTO_YAFFS2
+YAFFS_FS
+YAFFS_YAFFS1
+YAFFS_YAFFS2
+
+
+1.2 Required disabled config options
+------------------------------------
+CONFIG_YAFFS_DISABLE_LAZY_LOAD
+DNOTIFY
+
+
+1.3 Recommended enabled config options
+------------------------------
+ANDROID_PMEM
+PSTORE_CONSOLE
+PSTORE_RAM
+SCHEDSTATS
+DEBUG_PREEMPT
+DEBUG_MUTEXES
+DEBUG_SPINLOCK_SLEEP
+DEBUG_INFO
+FRAME_POINTER
+CPU_FREQ
+CPU_FREQ_TABLE
+CPU_FREQ_DEFAULT_GOV_ONDEMAND
+CPU_FREQ_GOV_ONDEMAND
+CRC_CCITT
+EMBEDDED
+INPUT_TOUCHSCREEN
+I2C
+I2C_BOARDINFO
+LOG_BUF_SHIFT=17
+SERIAL_CORE
+SERIAL_CORE_CONSOLE
+
+
+2. Contact
+==========
+website: http://android.git.kernel.org
+
+mailing-lists: android-kernel@googlegroups.com
index 638bf17ff86965a561b5cc258451ea53bd752f1a..61dc0ec5c9a9b11986f625cdc1e891e272cd5f06 100644 (file)
@@ -598,6 +598,15 @@ is completely unused; @cgrp->parent is still valid. (Note - can also
 be called for a newly-created cgroup if an error occurs after this
 subsystem's create() method has been called for the new cgroup).
 
+int allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+(cgroup_mutex held by caller)
+
+Called prior to moving a task into a cgroup; if the subsystem
+returns an error, this will abort the attach operation.  Used
+to extend the permission checks - if all subsystems in a cgroup
+return 0, the attach will be allowed to proceed, even if the
+default permission check (root or same user) fails.
+
 int can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 (cgroup_mutex held by caller)
 
index 219970ba54b736b2ca6e9d841645b9e71d47df1d..875eecbed8c869c18ee87caf4aa671851e93523f 100644 (file)
@@ -28,6 +28,7 @@ Contents:
 2.3  Userspace
 2.4  Ondemand
 2.5  Conservative
+2.6  Interactive
 
 3.   The Governor Interface in the CPUfreq Core
 
@@ -218,6 +219,90 @@ a decision on when to decrease the frequency while running in any
 speed. Load for frequency increase is still evaluated every
 sampling rate.
 
+2.6 Interactive
+---------------
+
+The CPUfreq governor "interactive" is designed for latency-sensitive,
+interactive workloads. This governor sets the CPU speed depending on
+usage, similar to "ondemand" and "conservative" governors, but with a
+different set of configurable behaviors.
+
+The tuneable values for this governor are:
+
+target_loads: CPU load values used to adjust speed to influence the
+current CPU load toward that value.  In general, the lower the target
+load, the more often the governor will raise CPU speeds to bring load
+below the target.  The format is a single target load, optionally
+followed by pairs of CPU speeds and CPU loads to target at or above
+those speeds.  Colons can be used between the speeds and associated
+target loads for readability.  For example:
+
+   85 1000000:90 1700000:99
+
+targets CPU load 85% below speed 1GHz, 90% at or above 1GHz, until
+1.7GHz and above, at which load 99% is targeted.  If speeds are
+specified these must appear in ascending order.  Higher target load
+values are typically specified for higher speeds, that is, target load
+values also usually appear in an ascending order. The default is
+target load 90% for all speeds.
+
+min_sample_time: The minimum amount of time to spend at the current
+frequency before ramping down. Default is 80000 uS.
+
+hispeed_freq: An intermediate "hi speed" at which to initially ramp
+when CPU load hits the value specified in go_hispeed_load.  If load
+stays high for the amount of time specified in above_hispeed_delay,
+then speed may be bumped higher.  Default is the maximum speed
+allowed by the policy at governor initialization time.
+
+go_hispeed_load: The CPU load at which to ramp to hispeed_freq.
+Default is 99%.
+
+above_hispeed_delay: When speed is at or above hispeed_freq, wait for
+this long before raising speed in response to continued high load.
+The format is a single delay value, optionally followed by pairs of
+CPU speeds and the delay to use at or above those speeds.  Colons can
+be used between the speeds and associated delays for readability.  For
+example:
+
+   80000 1300000:200000 1500000:40000
+
+uses delay 80000 uS until CPU speed 1.3 GHz, at which speed delay
+200000 uS is used until speed 1.5 GHz, at which speed (and above)
+delay 40000 uS is used.  If speeds are specified these must appear in
+ascending order.  Default is 20000 uS.
+
+timer_rate: Sample rate for reevaluating CPU load when the CPU is not
+idle.  A deferrable timer is used, such that the CPU will not be woken
+from idle to service this timer until something else needs to run.
+(The maximum time to allow deferring this timer when not running at
+minimum speed is configurable via timer_slack.)  Default is 20000 uS.
+
+timer_slack: Maximum additional time to defer handling the governor
+sampling timer beyond timer_rate when running at speeds above the
+minimum.  For platforms that consume additional power at idle when
+CPUs are running at speeds greater than minimum, this places an upper
+bound on how long the timer will be deferred prior to re-evaluating
+load and dropping speed.  For example, if timer_rate is 20000uS and
+timer_slack is 10000uS then timers will be deferred for up to 30msec
+when not at lowest speed.  A value of -1 means defer timers
+indefinitely at all speeds.  Default is 80000 uS.
+
+boost: If non-zero, immediately boost speed of all CPUs to at least
+hispeed_freq until zero is written to this attribute.  If zero, allow
+CPU speeds to drop below hispeed_freq according to load as usual.
+Default is zero.
+
+boostpulse: On each write, immediately boost speed of all CPUs to
+hispeed_freq for at least the period of time specified by
+boostpulse_duration, after which speeds are allowed to drop below
+hispeed_freq according to load as usual.
+
+boostpulse_duration: Length of time to hold CPU speed at hispeed_freq
+on a write to boostpulse, before allowing speed to drop according to
+load as usual.  Default is 80000 uS.
+
+
 3. The Governor Interface in the CPUfreq Core
 =============================================
 
index 954eab8c7fecd6b5368735dc632c16ad502d0388..7ce8ef7d764a8e4c128794085f98a09b80189cc0 100644 (file)
@@ -369,6 +369,8 @@ is not associated with a file:
  [stack:1001]             = the stack of the thread with tid 1001
  [vdso]                   = the "virtual dynamic shared object",
                             the kernel system call handler
+ [anon:<name>]            = an anonymous mapping that has been
+                            named by userspace
 
  or if empty, the mapping is anonymous.
 
@@ -419,6 +421,7 @@ KernelPageSize:        4 kB
 MMUPageSize:           4 kB
 Locked:              374 kB
 VmFlags: rd ex mr mw me de
+Name:           name from userspace
 
 the first of these lines shows the same information as is displayed for the
 mapping in /proc/PID/maps.  The remaining lines show the size of the mapping
@@ -469,6 +472,9 @@ Note that there is no guarantee that every flag and associated mnemonic will
 be present in all further kernel releases. Things get changed, the flags may
 be vanished or the reverse -- new added.
 
+The "Name" field will only be present on a mapping that has been named by
+userspace, and will show the name passed in by userspace.
+
 This file is only present if the CONFIG_MMU kernel configuration option is
 enabled.
 
index a59ee432a98f4b545fc322765cb5bf837c886af2..99d2164180613804ecc3e210879ff96760636e16 100644 (file)
@@ -22,6 +22,15 @@ ip_no_pmtu_disc - BOOLEAN
 min_pmtu - INTEGER
        default 552 - minimum discovered Path MTU
 
+fwmark_reflect - BOOLEAN
+       Controls the fwmark of kernel-generated IPv4 reply packets that are not
+       associated with a socket for example, TCP RSTs or ICMP echo replies).
+       If unset, these packets have a fwmark of zero. If set, they have the
+       fwmark of the packet they are replying to. Similarly affects the fwmark
+       used by internal routing lookups triggered by incoming packets, such as
+       the ones used for Path MTU Discovery.
+       Default: 0
+
 route/max_size - INTEGER
        Maximum number of routes allowed in the kernel.  Increase
        this when using large numbers of interfaces and/or routes.
@@ -468,6 +477,16 @@ tcp_fastopen - INTEGER
 
        See include/net/tcp.h and the code for more details.
 
+tcp_fwmark_accept - BOOLEAN
+       If set, incoming connections to listening sockets that do not have a
+       socket mark will set the mark of the accepting socket to the fwmark of
+       the incoming SYN packet. This will cause all packets on that connection
+       (starting from the first SYNACK) to be sent with that fwmark. The
+       listening socket's mark is unchanged. Listening sockets that already
+       have a fwmark set via setsockopt(SOL_SOCKET, SO_MARK, ...) are
+       unaffected.
+       Default: 0
+
 tcp_syn_retries - INTEGER
        Number of times initial SYNs for an active TCP connection attempt
        will be retransmitted. Should not be higher than 255. Default value
@@ -1093,6 +1112,15 @@ conf/all/forwarding - BOOLEAN
 proxy_ndp - BOOLEAN
        Do proxy ndp.
 
+fwmark_reflect - BOOLEAN
+       Controls the fwmark of kernel-generated IPv6 reply packets that are not
+       associated with a socket for example, TCP RSTs or ICMPv6 echo replies).
+       If unset, these packets have a fwmark of zero. If set, they have the
+       fwmark of the packet they are replying to. Similarly affects the fwmark
+       used by internal routing lookups triggered by incoming packets, such as
+       the ones used for Path MTU Discovery.
+       Default: 0
+
 conf/interface/*:
        Change special settings per interface.
 
diff --git a/Documentation/sync.txt b/Documentation/sync.txt
new file mode 100644 (file)
index 0000000..a2d05e7
--- /dev/null
@@ -0,0 +1,75 @@
+Motivation:
+
+In complicated DMA pipelines such as graphics (multimedia, camera, gpu, display)
+a consumer of a buffer needs to know when the producer has finished producing
+it.  Likewise the producer needs to know when the consumer is finished with the
+buffer so it can reuse it.  A particular buffer may be consumed by multiple
+consumers which will retain the buffer for different amounts of time.  In
+addition, a consumer may consume multiple buffers atomically.
+The sync framework adds an API which allows synchronization between the
+producers and consumers in a generic way while also allowing platforms which
+have shared hardware synchronization primitives to exploit them.
+
+Goals:
+       * provide a generic API for expressing synchronization dependencies
+       * allow drivers to exploit hardware synchronization between hardware
+         blocks
+       * provide a userspace API that allows a compositor to manage
+         dependencies.
+       * provide rich telemetry data to allow debugging slowdowns and stalls of
+          the graphics pipeline.
+
+Objects:
+       * sync_timeline
+       * sync_pt
+       * sync_fence
+
+sync_timeline:
+
+A sync_timeline is an abstract monotonically increasing counter. In general,
+each driver/hardware block context will have one of these.  They can be backed
+by the appropriate hardware or rely on the generic sw_sync implementation.
+Timelines are only ever created through their specific implementations
+(i.e. sw_sync.)
+
+sync_pt:
+
+A sync_pt is an abstract value which marks a point on a sync_timeline. Sync_pts
+have a single timeline parent.  They have 3 states: active, signaled, and error.
+They start in active state and transition, once, to either signaled (when the
+timeline counter advances beyond the sync_pt’s value) or error state.
+
+sync_fence:
+
+Sync_fences are the primary primitives used by drivers to coordinate
+synchronization of their buffers.  They are a collection of sync_pts which may
+or may not have the same timeline parent.  A sync_pt can only exist in one fence
+and the fence's list of sync_pts is immutable once created.  Fences can be
+waited on synchronously or asynchronously.  Two fences can also be merged to
+create a third fence containing a copy of the two fences’ sync_pts.  Fences are
+backed by file descriptors to allow userspace to coordinate the display pipeline
+dependencies.
+
+Use:
+
+A driver implementing sync support should have a work submission function which:
+     * takes a fence argument specifying when to begin work
+     * asynchronously queues that work to kick off when the fence is signaled
+     * returns a fence to indicate when its work will be done.
+     * signals the returned fence once the work is completed.
+
+Consider an imaginary display driver that has the following API:
+/*
+ * assumes buf is ready to be displayed.
+ * blocks until the buffer is on screen.
+ */
+    void display_buffer(struct dma_buf *buf);
+
+The new API will become:
+/*
+ * will display buf when fence is signaled.
+ * returns immediately with a fence that will signal when buf
+ * is no longer displayed.
+ */
+struct sync_fence* display_buffer(struct dma_buf *buf,
+                                 struct sync_fence *fence);
index dcc75a9ed91961a612e88e6392ab3ab565b09d9c..b81fca90f7fe4acdaaf2f07682696294e4472b4e 100644 (file)
@@ -29,6 +29,7 @@ Currently, these files are in /proc/sys/vm:
 - dirty_writeback_centisecs
 - drop_caches
 - extfrag_threshold
+- extra_free_kbytes
 - hugepages_treat_as_movable
 - hugetlb_shm_group
 - laptop_mode
@@ -198,6 +199,21 @@ fragmentation index is <= extfrag_threshold. The default value is 500.
 
 ==============================================================
 
+extra_free_kbytes
+
+This parameter tells the VM to keep extra free memory between the threshold
+where background reclaim (kswapd) kicks in, and the threshold where direct
+reclaim (by allocating processes) kicks in.
+
+This is useful for workloads that require low latency memory allocations
+and have a bounded burstiness in memory allocations, for example a
+realtime application that receives and transmits network traffic
+(causing in-kernel memory allocations) with a maximum total message burst
+size of 200MB may need 200MB of extra free memory to avoid direct reclaim
+related latencies.
+
+==============================================================
+
 hugepages_treat_as_movable
 
 This parameter is only useful when kernelcore= is specified at boot time to
index bfe8c29b1f1d84cf5c6a9d71c3c69b20f2bdf12f..29064c3bfcd1bae1b0f39d6078a3f28e34ed3803 100644 (file)
@@ -2013,6 +2013,35 @@ will produce:
  1)   1.449 us    |             }
 
 
+You can disable the hierarchical function call formatting and instead print a
+flat list of function entry and return events.  This uses the format described
+in the Output Formatting section and respects all the trace options that
+control that formatting.  Hierarchical formatting is the default.
+
+       hierachical: echo nofuncgraph-flat > trace_options
+       flat: echo funcgraph-flat > trace_options
+
+  ie:
+
+  # tracer: function_graph
+  #
+  # entries-in-buffer/entries-written: 68355/68355   #P:2
+  #
+  #                              _-----=> irqs-off
+  #                             / _----=> need-resched
+  #                            | / _---=> hardirq/softirq
+  #                            || / _--=> preempt-depth
+  #                            ||| /     delay
+  #           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION
+  #              | |       |   ||||       |         |
+                sh-1806  [001] d...   198.843443: graph_ent: func=_raw_spin_lock
+                sh-1806  [001] d...   198.843445: graph_ent: func=__raw_spin_lock
+                sh-1806  [001] d..1   198.843447: graph_ret: func=__raw_spin_lock
+                sh-1806  [001] d..1   198.843449: graph_ret: func=_raw_spin_lock
+                sh-1806  [001] d..1   198.843451: graph_ent: func=_raw_spin_unlock_irqrestore
+                sh-1806  [001] d...   198.843453: graph_ret: func=_raw_spin_unlock_irqrestore
+
+
 You might find other useful features for this tracer in the
 following "dynamic ftrace" section such as tracing only specific
 functions or tasks.
diff --git a/android/configs/README b/android/configs/README
new file mode 100644 (file)
index 0000000..8798731
--- /dev/null
@@ -0,0 +1,15 @@
+The files in this directory are meant to be used as a base for an Android
+kernel config. All devices should have the options in android-base.cfg enabled.
+While not mandatory, the options in android-recommended.cfg enable advanced
+Android features.
+
+Assuming you already have a minimalist defconfig for your device, a possible
+way to enable these options would be:
+
+     ARCH=<arch> scripts/kconfig/merge_config.sh <path_to>/<device>_defconfig android/configs/android-base.cfg android/configs/android-recommended.cfg
+
+This will generate a .config that can then be used to save a new defconfig or
+compile a new kernel with Android features enabled.
+
+Because there is no tool to consistently generate these config fragments,
+lets keep them alphabetically sorted instead of random.
diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg
new file mode 100644 (file)
index 0000000..5b88848
--- /dev/null
@@ -0,0 +1,139 @@
+#  KEEP ALPHABETICALLY SORTED
+# CONFIG_INET_LRO is not set
+# CONFIG_MODULES is not set
+# CONFIG_OABI_COMPAT is not set
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_INTF_ALARM_DEV=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ASHMEM=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_VERITY=y
+CONFIG_EMBEDDED=y
+CONFIG_FB=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_INET=y
+CONFIG_INET_ESP=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_NET=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_KEY=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_NAT=y
+CONFIG_NO_HZ=y
+CONFIG_PACKET=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PPP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PREEMPT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_STAGING=y
+CONFIG_SWITCH=y
+CONFIG_SYNC=y
+CONFIG_SYSVIPC=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_XFRM_USER=y
diff --git a/android/configs/android-recommended.cfg b/android/configs/android-recommended.cfg
new file mode 100644 (file)
index 0000000..960b9de
--- /dev/null
@@ -0,0 +1,121 @@
+#  KEEP ALPHABETICALLY SORTED
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_NF_CONNTRACK_SIP is not set
+# CONFIG_PM_WAKELOCKS_GC is not set
+# CONFIG_VT is not set
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_COMPACTION=y
+CONFIG_DM_UEVENT=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FUSE_FS=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HIDRAW=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_HOLTEK=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_GPIO=y
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_TABLET=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_ION=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KSM=y
+CONFIG_LOGIG940_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGITECH_FF=y
+CONFIG_MD=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MSDOS_FS=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANTHERLORD_FF=y
+CONFIG_PERF_EVENTS=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+CONFIG_POWER_SUPPLY=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_SND=y
+CONFIG_SOUND=y
+CONFIG_SUSPEND_TIME=y
+CONFIG_TABLET_USB_ACECAD=y
+CONFIG_TABLET_USB_AIPTEK=y
+CONFIG_TABLET_USB_GTCO=y
+CONFIG_TABLET_USB_HANWANG=y
+CONFIG_TABLET_USB_KBTAB=y
+CONFIG_TABLET_USB_WACOM=y
+CONFIG_TIMER_STATS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UHID=y
+CONFIG_UID_STAT=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_USBNET=y
+CONFIG_VFAT_FS=y
index fd7c255156bb9b62ac7a7cca5ca21de688e8e94f..bd4e7c31e9e7408f6f93480e3dae8c0c1167fa4e 100644 (file)
@@ -1949,6 +1949,15 @@ config XEN
        help
          Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
 
+config ARM_FLUSH_CONSOLE_ON_RESTART
+       bool "Force flush the console on restart"
+       help
+         If the console is locked while the system is rebooted, the messages
+         in the temporary logbuffer would not have propogated to all the
+         console drivers. This option forces the console lock to be
+         released if it failed to be acquired, which will cause all the
+         pending messages to be flushed.
+
 endmenu
 
 menu "Boot options"
@@ -1978,6 +1987,21 @@ config DEPRECATED_PARAM_STRUCT
          This was deprecated in 2001 and announced to live on for 5 years.
          Some old boot loaders still use this way.
 
+config BUILD_ARM_APPENDED_DTB_IMAGE
+       bool "Build a concatenated zImage/dtb by default"
+       depends on OF
+       help
+         Enabling this option will cause a concatenated zImage and DTB to
+         be built by default (instead of a standalone zImage.)  The image
+         will built in arch/arm/boot/zImage-dtb.<dtb name>
+
+config BUILD_ARM_APPENDED_DTB_IMAGE_NAME
+       string "Default dtb name"
+       depends on BUILD_ARM_APPENDED_DTB_IMAGE
+       help
+         name of the dtb to append when building a concatenated
+         zImage/dtb.
+
 # Compressed boot loader in ROM.  Yes, we really want to ask about
 # TEXT and BSS so we preserve their values in the config files.
 config ZBOOT_ROM_TEXT
index 1d41908d5cda0644a31a9048c882369f21db235b..21cc8a76598873aea2fb0dc2160380473548364d 100644 (file)
@@ -63,6 +63,27 @@ config DEBUG_USER
              8 - SIGSEGV faults
             16 - SIGBUS faults
 
+config DEBUG_RODATA
+       bool "Write protect kernel text section"
+       default n
+       depends on DEBUG_KERNEL && MMU
+       ---help---
+         Mark the kernel text section as write-protected in the pagetables,
+         in order to catch accidental (and incorrect) writes to such const
+         data. This will cause the size of the kernel, plus up to 4MB, to
+         be mapped as pages instead of sections, which will increase TLB
+         pressure.
+         If in doubt, say "N".
+
+config DEBUG_RODATA_TEST
+       bool "Testcase for the DEBUG_RODATA feature"
+       depends on DEBUG_RODATA
+       default n
+       ---help---
+         This option enables a testcase for the DEBUG_RODATA
+         feature.
+         If in doubt, say "N"
+
 # These options are only for real kernel hackers who want to get their hands dirty.
 config DEBUG_LL
        bool "Kernel low-level debugging functions (read help!)"
index 70bc19e2274f090c9f4efec160c80d4b05ac8781..9d36200374f056aa7daee3108c9e8ca5279f7a14 100644 (file)
@@ -265,6 +265,8 @@ libs-y                              := arch/arm/lib/ $(libs-y)
 # Default target when executing plain make
 ifeq ($(CONFIG_XIP_KERNEL),y)
 KBUILD_IMAGE := xipImage
+else ifeq ($(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE),y)
+KBUILD_IMAGE := zImage-dtb.$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAME)
 else
 KBUILD_IMAGE := zImage
 endif
index 84aa2caf07ed203fb810220258401a1b51f7cab3..085bb96493a373d7c2e2a17b20bf49f17b173f08 100644 (file)
@@ -14,6 +14,7 @@
 ifneq ($(MACHINE),)
 include $(srctree)/$(MACHINE)/Makefile.boot
 endif
+include $(srctree)/arch/arm/boot/dts/Makefile
 
 # Note: the following conditions must always be true:
 #   ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
index f6e34be012ff816b9bf0c9086d18d03f9b55fcbd..a8264aa9b03a332a7aeec0d8a08b99e717a8651e 100644 (file)
@@ -714,6 +714,8 @@ __armv7_mmu_cache_on:
                bic     r6, r6, #1 << 31        @ 32-bit translation system
                bic     r6, r6, #3 << 0         @ use only ttbr0
                mcrne   p15, 0, r3, c2, c0, 0   @ load page table pointer
+               mcrne   p15, 0, r0, c8, c7, 0   @ flush I,D TLBs
+               mcr     p15, 0, r0, c7, c5, 4   @ ISB
                mcrne   p15, 0, r1, c3, c0, 0   @ load domain access control
                mcrne   p15, 0, r6, c2, c0, 2   @ load ttb control
 #endif
index 9353184d730dfda864c85ec180b906b9ed575681..ce01364a96e3b37ae4b7e2147515834131ca2922 100644 (file)
@@ -17,3 +17,7 @@ config SHARP_PARAM
 
 config SHARP_SCOOP
        bool
+
+config FIQ_GLUE
+       bool
+       select FIQ
index 462cd580fc2d765bb021e78bc8e79a1b34dcaf47..505c479202b636ceb5166fb2eaf86df6cbdb3621 100644 (file)
@@ -4,6 +4,7 @@
 
 obj-y                          += firmware.o
 
+obj-$(CONFIG_FIQ_GLUE)         += fiq_glue.o fiq_glue_setup.o
 obj-$(CONFIG_ICST)             += icst.o
 obj-$(CONFIG_SA1111)           += sa1111.o
 obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S
new file mode 100644 (file)
index 0000000..24b42ce
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+               .text
+
+               .global fiq_glue_end
+
+               /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
+
+ENTRY(fiq_glue)
+               /* store pc, cpsr from previous mode, reserve space for spsr */
+               mrs     r12, spsr
+               sub     lr, lr, #4
+               subs    r10, #1
+               bne     nested_fiq
+
+               str     r12, [sp, #-8]!
+               str     lr, [sp, #-4]!
+
+               /* store r8-r14 from previous mode */
+               sub     sp, sp, #(7 * 4)
+               stmia   sp, {r8-r14}^
+               nop
+
+               /* store r0-r7 from previous mode */
+               stmfd   sp!, {r0-r7}
+
+               /* setup func(data,regs) arguments */
+               mov     r0, r9
+               mov     r1, sp
+               mov     r3, r8
+
+               mov     r7, sp
+
+               /* Get sp and lr from non-user modes */
+               and     r4, r12, #MODE_MASK
+               cmp     r4, #USR_MODE
+               beq     fiq_from_usr_mode
+
+               mov     r7, sp
+               orr     r4, r4, #(PSR_I_BIT | PSR_F_BIT)
+               msr     cpsr_c, r4
+               str     sp, [r7, #(4 * 13)]
+               str     lr, [r7, #(4 * 14)]
+               mrs     r5, spsr
+               str     r5, [r7, #(4 * 17)]
+
+               cmp     r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+               /* use fiq stack if we reenter this mode */
+               subne   sp, r7, #(4 * 3)
+
+fiq_from_usr_mode:
+               msr     cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+               mov     r2, sp
+               sub     sp, r7, #12
+               stmfd   sp!, {r2, ip, lr}
+               /* call func(data,regs) */
+               blx     r3
+               ldmfd   sp, {r2, ip, lr}
+               mov     sp, r2
+
+               /* restore/discard saved state */
+               cmp     r4, #USR_MODE
+               beq     fiq_from_usr_mode_exit
+
+               msr     cpsr_c, r4
+               ldr     sp, [r7, #(4 * 13)]
+               ldr     lr, [r7, #(4 * 14)]
+               msr     spsr_cxsf, r5
+
+fiq_from_usr_mode_exit:
+               msr     cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+
+               ldmfd   sp!, {r0-r7}
+               ldr     lr, [sp, #(4 * 7)]
+               ldr     r12, [sp, #(4 * 8)]
+               add     sp, sp, #(10 * 4)
+exit_fiq:
+               msr     spsr_cxsf, r12
+               add     r10, #1
+               cmp     r11, #0
+               moveqs  pc, lr
+               bx      r11 /* jump to custom fiq return function */
+
+nested_fiq:
+               orr     r12, r12, #(PSR_F_BIT)
+               b       exit_fiq
+
+fiq_glue_end:
+
+ENTRY(fiq_glue_setup) /* func, data, sp, smc call number */
+               stmfd           sp!, {r4}
+               mrs             r4, cpsr
+               msr             cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+               movs            r8, r0
+               mov             r9, r1
+               mov             sp, r2
+               mov             r11, r3
+               moveq           r10, #0
+               movne           r10, #1
+               msr             cpsr_c, r4
+               ldmfd           sp!, {r4}
+               bx              lr
+
diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c
new file mode 100644 (file)
index 0000000..8cb1b61
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <asm/fiq.h>
+#include <asm/fiq_glue.h>
+
+extern unsigned char fiq_glue, fiq_glue_end;
+extern void fiq_glue_setup(void *func, void *data, void *sp,
+                          fiq_return_handler_t fiq_return_handler);
+
+static struct fiq_handler fiq_debbuger_fiq_handler = {
+       .name = "fiq_glue",
+};
+DEFINE_PER_CPU(void *, fiq_stack);
+static struct fiq_glue_handler *current_handler;
+static fiq_return_handler_t fiq_return_handler;
+static DEFINE_MUTEX(fiq_glue_lock);
+
+static void fiq_glue_setup_helper(void *info)
+{
+       struct fiq_glue_handler *handler = info;
+       fiq_glue_setup(handler->fiq, handler,
+               __get_cpu_var(fiq_stack) + THREAD_START_SP,
+               fiq_return_handler);
+}
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler)
+{
+       int ret;
+       int cpu;
+
+       if (!handler || !handler->fiq)
+               return -EINVAL;
+
+       mutex_lock(&fiq_glue_lock);
+       if (fiq_stack) {
+               ret = -EBUSY;
+               goto err_busy;
+       }
+
+       for_each_possible_cpu(cpu) {
+               void *stack;
+               stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+               if (WARN_ON(!stack)) {
+                       ret = -ENOMEM;
+                       goto err_alloc_fiq_stack;
+               }
+               per_cpu(fiq_stack, cpu) = stack;
+       }
+
+       ret = claim_fiq(&fiq_debbuger_fiq_handler);
+       if (WARN_ON(ret))
+               goto err_claim_fiq;
+
+       current_handler = handler;
+       on_each_cpu(fiq_glue_setup_helper, handler, true);
+       set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue);
+
+       mutex_unlock(&fiq_glue_lock);
+       return 0;
+
+err_claim_fiq:
+err_alloc_fiq_stack:
+       for_each_possible_cpu(cpu) {
+               __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER);
+               per_cpu(fiq_stack, cpu) = NULL;
+       }
+err_busy:
+       mutex_unlock(&fiq_glue_lock);
+       return ret;
+}
+
+static void fiq_glue_update_return_handler(void (*fiq_return)(void))
+{
+       fiq_return_handler = fiq_return;
+       if (current_handler)
+               on_each_cpu(fiq_glue_setup_helper, current_handler, true);
+}
+
+int fiq_glue_set_return_handler(void (*fiq_return)(void))
+{
+       int ret;
+
+       mutex_lock(&fiq_glue_lock);
+       if (fiq_return_handler) {
+               ret = -EBUSY;
+               goto err_busy;
+       }
+       fiq_glue_update_return_handler(fiq_return);
+       ret = 0;
+err_busy:
+       mutex_unlock(&fiq_glue_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(fiq_glue_set_return_handler);
+
+int fiq_glue_clear_return_handler(void (*fiq_return)(void))
+{
+       int ret;
+
+       mutex_lock(&fiq_glue_lock);
+       if (WARN_ON(fiq_return_handler != fiq_return)) {
+               ret = -EINVAL;
+               goto err_inval;
+       }
+       fiq_glue_update_return_handler(NULL);
+       ret = 0;
+err_inval:
+       mutex_unlock(&fiq_glue_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(fiq_glue_clear_return_handler);
+
+/**
+ * fiq_glue_resume - Restore fiqs after suspend or low power idle states
+ *
+ * This must be called before calling local_fiq_enable after returning from a
+ * power state where the fiq mode registers were lost. If a driver provided
+ * a resume hook when it registered the handler it will be called.
+ */
+
+void fiq_glue_resume(void)
+{
+       if (!current_handler)
+               return;
+       fiq_glue_setup(current_handler->fiq, current_handler,
+               __get_cpu_var(fiq_stack) + THREAD_START_SP,
+               fiq_return_handler);
+       if (current_handler->resume)
+               current_handler->resume(current_handler);
+}
+
index 2059f019bef474468694246377dcf3f73b537f11..455e6637c881da255a7007a555585be8a9fbfdfa 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/shmparam.h>
 #include <asm/cachetype.h>
 #include <asm/outercache.h>
+#include <asm/rodata.h>
 
 #define CACHE_COLOUR(vaddr)    ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
 
diff --git a/arch/arm/include/asm/fiq_glue.h b/arch/arm/include/asm/fiq_glue.h
new file mode 100644 (file)
index 0000000..a9e244f
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_FIQ_GLUE_H
+#define __ASM_FIQ_GLUE_H
+
+struct fiq_glue_handler {
+       void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp);
+       void (*resume)(struct fiq_glue_handler *h);
+};
+typedef void (*fiq_return_handler_t)(void);
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler);
+int fiq_glue_set_return_handler(fiq_return_handler_t fiq_return);
+int fiq_glue_clear_return_handler(fiq_return_handler_t fiq_return);
+
+#ifdef CONFIG_FIQ_GLUE
+void fiq_glue_resume(void);
+#else
+static inline void fiq_glue_resume(void) {}
+#endif
+
+#endif
index 3d7351c844aac0ae2392d441796ce9904dcaf717..fe3ea776dc34267724f377465134e52b39434fed 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/threads.h>
 #include <asm/irq.h>
 
-#define NR_IPI 7
+#define NR_IPI 8
 
 typedef struct {
        unsigned int __softirq_pending;
index 3b2c40b5bfa22de982fce4e79afc8ced0326dce1..0ca0f5a7c84b33774d6e80ab564103b15ebfdbf0 100644 (file)
@@ -66,6 +66,7 @@
 #define   L2X0_STNDBY_MODE_EN          (1 << 0)
 
 /* Registers shifts and masks */
+#define L2X0_CACHE_ID_REV_MASK         (0x3f)
 #define L2X0_CACHE_ID_PART_MASK                (0xf << 6)
 #define L2X0_CACHE_ID_PART_L210                (1 << 6)
 #define L2X0_CACHE_ID_PART_L310                (3 << 6)
 
 #define L2X0_WAY_SIZE_SHIFT            3
 
+#define REV_PL310_R2P0                         4
+
 #ifndef __ASSEMBLY__
 extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask);
 #if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF)
index ad774f37c47cda0f6201d7a7aad7d967cec4786b..fc53019c304b6c15c70f6302f54a17671ad1543d 100644 (file)
 #define TRACER_ACCESSED_BIT    0
 #define TRACER_RUNNING_BIT     1
 #define TRACER_CYCLE_ACC_BIT   2
+#define TRACER_TRACE_DATA_BIT  3
+#define TRACER_TIMESTAMP_BIT   4
+#define TRACER_BRANCHOUTPUT_BIT        5
+#define TRACER_RETURN_STACK_BIT        6
 #define TRACER_ACCESSED                BIT(TRACER_ACCESSED_BIT)
 #define TRACER_RUNNING         BIT(TRACER_RUNNING_BIT)
 #define TRACER_CYCLE_ACC       BIT(TRACER_CYCLE_ACC_BIT)
+#define TRACER_TRACE_DATA      BIT(TRACER_TRACE_DATA_BIT)
+#define TRACER_TIMESTAMP       BIT(TRACER_TIMESTAMP_BIT)
+#define TRACER_BRANCHOUTPUT    BIT(TRACER_BRANCHOUTPUT_BIT)
+#define TRACER_RETURN_STACK    BIT(TRACER_RETURN_STACK_BIT)
 
 #define TRACER_TIMEOUT 10000
 
@@ -43,7 +51,7 @@
 #define ETMCTRL_POWERDOWN      1
 #define ETMCTRL_PROGRAM                (1 << 10)
 #define ETMCTRL_PORTSEL                (1 << 11)
-#define ETMCTRL_DO_CONTEXTID   (3 << 14)
+#define ETMCTRL_CONTEXTIDSIZE(x) (((x) & 3) << 14)
 #define ETMCTRL_PORTMASK1      (7 << 4)
 #define ETMCTRL_PORTMASK2      (1 << 21)
 #define ETMCTRL_PORTMASK       (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2)
 #define ETMCTRL_DATA_DO_BOTH   (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR)
 #define ETMCTRL_BRANCH_OUTPUT  (1 << 8)
 #define ETMCTRL_CYCLEACCURATE  (1 << 12)
+#define ETMCTRL_TIMESTAMP_EN   (1 << 28)
+#define ETMCTRL_RETURN_STACK_EN        (1 << 29)
 
 /* ETM configuration code register */
 #define ETMR_CONFCODE          (0x04)
+#define ETMCCR_ETMIDR_PRESENT  BIT(31)
 
 /* ETM trace start/stop resource control register */
 #define ETMR_TRACESSCTRL       (0x18)
 #define ETMR_TRACEENCTRL       0x24
 #define ETMTE_INCLEXCL         BIT(24)
 #define ETMR_TRACEENEVT                0x20
-#define ETMCTRL_OPTS           (ETMCTRL_DO_CPRT | \
-                               ETMCTRL_DATA_DO_ADDR | \
-                               ETMCTRL_BRANCH_OUTPUT | \
-                               ETMCTRL_DO_CONTEXTID)
+
+#define ETMR_VIEWDATAEVT       0x30
+#define ETMR_VIEWDATACTRL1     0x34
+#define ETMR_VIEWDATACTRL2     0x38
+#define ETMR_VIEWDATACTRL3     0x3c
+#define ETMVDC3_EXCLONLY       BIT(16)
+
+#define ETMCTRL_OPTS           (ETMCTRL_DO_CPRT)
+
+#define ETMR_ID                        0x1e4
+#define ETMIDR_VERSION(x)      (((x) >> 4) & 0xff)
+#define ETMIDR_VERSION_3_1     0x21
+#define ETMIDR_VERSION_PFT_1_0 0x30
+
+#define ETMR_CCE               0x1e8
+#define ETMCCER_RETURN_STACK_IMPLEMENTED       BIT(23)
+#define ETMCCER_TIMESTAMPING_IMPLEMENTED       BIT(22)
+
+#define ETMR_TRACEIDR          0x200
 
 /* ETM management registers, "ETM Architecture", 3.5.24 */
 #define ETMMR_OSLAR    0x300
 #define ETBFF_TRIGIN           BIT(8)
 #define ETBFF_TRIGEVT          BIT(9)
 #define ETBFF_TRIGFL           BIT(10)
+#define ETBFF_STOPFL           BIT(12)
 
 #define etb_writel(t, v, x) \
        (writel_relaxed((v), (t)->etb_regs + (x)))
 #define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
 
-#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
-#define etm_unlock(t) \
-       do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
+#define etm_lock(t, id) \
+       do { etm_writel((t), (id), 0, CSMR_LOCKACCESS); } while (0)
+#define etm_unlock(t, id) \
+       do { etm_writel((t), (id), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
 
 #define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
 #define etb_unlock(t) \
index 53c15dec7af6aa09faee9b1851782f9244365e56..809203a4b71befacd5ce807c7c3caf17224e980b 100644 (file)
@@ -35,6 +35,9 @@ extern void (*handle_arch_irq)(struct pt_regs *);
 extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
 #endif
 
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+
 #endif
 
 #endif
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
new file mode 100644 (file)
index 0000000..bca864a
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ *  arch/arm/include/asm/mach/mmc.h
+ */
+#ifndef ASMARM_MACH_MMC_H
+#define ASMARM_MACH_MMC_H
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+
+struct embedded_sdio_data {
+        struct sdio_cis cis;
+        struct sdio_cccr cccr;
+        struct sdio_embedded_func *funcs;
+        int num_funcs;
+};
+
+struct mmc_platform_data {
+       unsigned int ocr_mask;                  /* available voltages */
+       int built_in;                           /* built-in device flag */
+       int card_present;                       /* card detect state */
+       u32 (*translate_vdd)(struct device *, unsigned int);
+       unsigned int (*status)(struct device *);
+       struct embedded_sdio_data *embedded_sdio;
+       int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
+};
+
+#endif
diff --git a/arch/arm/include/asm/rodata.h b/arch/arm/include/asm/rodata.h
new file mode 100644 (file)
index 0000000..8c8add8
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ *  arch/arm/include/asm/rodata.h
+ *
+ *  Copyright (C) 2011 Google, Inc.
+ *
+ *  Author: Colin Cross <ccross@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_RODATA_H
+#define _ASMARM_RODATA_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_DEBUG_RODATA
+
+int set_memory_rw(unsigned long virt, int numpages);
+int set_memory_ro(unsigned long virt, int numpages);
+
+void mark_rodata_ro(void);
+void set_kernel_text_rw(void);
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_rw(void) { }
+static inline void set_kernel_text_ro(void) { }
+#endif
+
+#endif
+
+#endif
index 610ccf33f5e760aece49be89a1052f6f64a05aef..67a18a5ed9fa08e7cbdb7a49526862121e794edf 100644 (file)
@@ -82,6 +82,7 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
 
 extern int register_ipi_completion(struct completion *completion, int cpu);
+extern void smp_send_all_cpu_backtrace(void);
 
 struct smp_operations {
 #ifdef CONFIG_SMP
index 8ff0ecdc637fd3bd2b41c0d129715acad13152e2..7db3247b218793b11d24fef048c56e6ba2bb7af8 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/io.h>
+#include <linux/slab.h>
 #include <linux/sysrq.h>
 #include <linux/device.h>
 #include <linux/clk.h>
@@ -37,26 +38,37 @@ MODULE_AUTHOR("Alexander Shishkin");
 struct tracectx {
        unsigned int    etb_bufsz;
        void __iomem    *etb_regs;
-       void __iomem    *etm_regs;
+       void __iomem    **etm_regs;
+       int             etm_regs_count;
        unsigned long   flags;
        int             ncmppairs;
        int             etm_portsz;
+       int             etm_contextid_size;
+       u32             etb_fc;
+       unsigned long   range_start;
+       unsigned long   range_end;
+       unsigned long   data_range_start;
+       unsigned long   data_range_end;
+       bool            dump_initial_etb;
        struct device   *dev;
        struct clk      *emu_clk;
        struct mutex    mutex;
 };
 
-static struct tracectx tracer;
+static struct tracectx tracer = {
+       .range_start = (unsigned long)_stext,
+       .range_end = (unsigned long)_etext,
+};
 
 static inline bool trace_isrunning(struct tracectx *t)
 {
        return !!(t->flags & TRACER_RUNNING);
 }
 
-static int etm_setup_address_range(struct tracectx *t, int n,
+static int etm_setup_address_range(struct tracectx *t, int id, int n,
                unsigned long start, unsigned long end, int exclude, int data)
 {
-       u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \
+       u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_IGNSECURITY |
                    ETMAAT_NOVALCMP;
 
        if (n < 1 || n > t->ncmppairs)
@@ -72,95 +84,185 @@ static int etm_setup_address_range(struct tracectx *t, int n,
                flags |= ETMAAT_IEXEC;
 
        /* first comparator for the range */
-       etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2));
-       etm_writel(t, start, ETMR_COMP_VAL(n * 2));
+       etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2));
+       etm_writel(t, id, start, ETMR_COMP_VAL(n * 2));
 
        /* second comparator is right next to it */
-       etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
-       etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1));
-
-       flags = exclude ? ETMTE_INCLEXCL : 0;
-       etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL);
+       etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
+       etm_writel(t, id, end, ETMR_COMP_VAL(n * 2 + 1));
+
+       if (data) {
+               flags = exclude ? ETMVDC3_EXCLONLY : 0;
+               if (exclude)
+                       n += 8;
+               etm_writel(t, id, flags | BIT(n), ETMR_VIEWDATACTRL3);
+       } else {
+               flags = exclude ? ETMTE_INCLEXCL : 0;
+               etm_writel(t, id, flags | (1 << n), ETMR_TRACEENCTRL);
+       }
 
        return 0;
 }
 
-static int trace_start(struct tracectx *t)
+static int trace_start_etm(struct tracectx *t, int id)
 {
        u32 v;
        unsigned long timeout = TRACER_TIMEOUT;
 
-       etb_unlock(t);
-
-       etb_writel(t, 0, ETBR_FORMATTERCTRL);
-       etb_writel(t, 1, ETBR_CTRL);
-
-       etb_lock(t);
-
-       /* configure etm */
        v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
+       v |= ETMCTRL_CONTEXTIDSIZE(t->etm_contextid_size);
 
        if (t->flags & TRACER_CYCLE_ACC)
                v |= ETMCTRL_CYCLEACCURATE;
 
-       etm_unlock(t);
+       if (t->flags & TRACER_BRANCHOUTPUT)
+               v |= ETMCTRL_BRANCH_OUTPUT;
+
+       if (t->flags & TRACER_TRACE_DATA)
+               v |= ETMCTRL_DATA_DO_ADDR;
+
+       if (t->flags & TRACER_TIMESTAMP)
+               v |= ETMCTRL_TIMESTAMP_EN;
+
+       if (t->flags & TRACER_RETURN_STACK)
+               v |= ETMCTRL_RETURN_STACK_EN;
 
-       etm_writel(t, v, ETMR_CTRL);
+       etm_unlock(t, id);
 
-       while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+       etm_writel(t, id, v, ETMR_CTRL);
+
+       while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
                ;
        if (!timeout) {
                dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
-               etm_lock(t);
+               etm_lock(t, id);
                return -EFAULT;
        }
 
-       etm_setup_address_range(t, 1, (unsigned long)_stext,
-                       (unsigned long)_etext, 0, 0);
-       etm_writel(t, 0, ETMR_TRACEENCTRL2);
-       etm_writel(t, 0, ETMR_TRACESSCTRL);
-       etm_writel(t, 0x6f, ETMR_TRACEENEVT);
+       if (t->range_start || t->range_end)
+               etm_setup_address_range(t, id, 1,
+                                       t->range_start, t->range_end, 0, 0);
+       else
+               etm_writel(t, id, ETMTE_INCLEXCL, ETMR_TRACEENCTRL);
+
+       etm_writel(t, id, 0, ETMR_TRACEENCTRL2);
+       etm_writel(t, id, 0, ETMR_TRACESSCTRL);
+       etm_writel(t, id, 0x6f, ETMR_TRACEENEVT);
+
+       etm_writel(t, id, 0, ETMR_VIEWDATACTRL1);
+       etm_writel(t, id, 0, ETMR_VIEWDATACTRL2);
+
+       if (t->data_range_start || t->data_range_end)
+               etm_setup_address_range(t, id, 2, t->data_range_start,
+                                       t->data_range_end, 0, 1);
+       else
+               etm_writel(t, id, ETMVDC3_EXCLONLY, ETMR_VIEWDATACTRL3);
+
+       etm_writel(t, id, 0x6f, ETMR_VIEWDATAEVT);
 
        v &= ~ETMCTRL_PROGRAM;
        v |= ETMCTRL_PORTSEL;
 
-       etm_writel(t, v, ETMR_CTRL);
+       etm_writel(t, id, v, ETMR_CTRL);
 
        timeout = TRACER_TIMEOUT;
-       while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
+       while (etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
                ;
        if (!timeout) {
                dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
-               etm_lock(t);
+               etm_lock(t, id);
                return -EFAULT;
        }
 
-       etm_lock(t);
+       etm_lock(t, id);
+       return 0;
+}
+
+static int trace_start(struct tracectx *t)
+{
+       int ret;
+       int id;
+       u32 etb_fc = t->etb_fc;
+
+       etb_unlock(t);
+
+       t->dump_initial_etb = false;
+       etb_writel(t, 0, ETBR_WRITEADDR);
+       etb_writel(t, etb_fc, ETBR_FORMATTERCTRL);
+       etb_writel(t, 1, ETBR_CTRL);
+
+       etb_lock(t);
+
+       /* configure etm(s) */
+       for (id = 0; id < t->etm_regs_count; id++) {
+               ret = trace_start_etm(t, id);
+               if (ret)
+                       return ret;
+       }
 
        t->flags |= TRACER_RUNNING;
 
        return 0;
 }
 
-static int trace_stop(struct tracectx *t)
+static int trace_stop_etm(struct tracectx *t, int id)
 {
        unsigned long timeout = TRACER_TIMEOUT;
 
-       etm_unlock(t);
+       etm_unlock(t, id);
 
-       etm_writel(t, 0x440, ETMR_CTRL);
-       while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+       etm_writel(t, id, 0x440, ETMR_CTRL);
+       while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
                ;
        if (!timeout) {
-               dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
-               etm_lock(t);
+               dev_err(t->dev,
+                       "etm%d: Waiting for progbit to assert timed out\n",
+                       id);
+               etm_lock(t, id);
                return -EFAULT;
        }
 
-       etm_lock(t);
+       etm_lock(t, id);
+       return 0;
+}
+
+static int trace_power_down_etm(struct tracectx *t, int id)
+{
+       unsigned long timeout = TRACER_TIMEOUT;
+       etm_unlock(t, id);
+       while (!(etm_readl(t, id, ETMR_STATUS) & ETMST_PROGBIT) && --timeout)
+               ;
+       if (!timeout) {
+               dev_err(t->dev, "etm%d: Waiting for status progbit to assert timed out\n",
+                       id);
+               etm_lock(t, id);
+               return -EFAULT;
+       }
+
+       etm_writel(t, id, 0x441, ETMR_CTRL);
+
+       etm_lock(t, id);
+       return 0;
+}
+
+static int trace_stop(struct tracectx *t)
+{
+       int id;
+       unsigned long timeout = TRACER_TIMEOUT;
+       u32 etb_fc = t->etb_fc;
+
+       for (id = 0; id < t->etm_regs_count; id++)
+               trace_stop_etm(t, id);
+
+       for (id = 0; id < t->etm_regs_count; id++)
+               trace_power_down_etm(t, id);
 
        etb_unlock(t);
-       etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
+       if (etb_fc) {
+               etb_fc |= ETBFF_STOPFL;
+               etb_writel(t, t->etb_fc, ETBR_FORMATTERCTRL);
+       }
+       etb_writel(t, etb_fc | ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
 
        timeout = TRACER_TIMEOUT;
        while (etb_readl(t, ETBR_FORMATTERCTRL) &
@@ -185,24 +287,15 @@ static int trace_stop(struct tracectx *t)
 static int etb_getdatalen(struct tracectx *t)
 {
        u32 v;
-       int rp, wp;
+       int wp;
 
        v = etb_readl(t, ETBR_STATUS);
 
        if (v & 1)
                return t->etb_bufsz;
 
-       rp = etb_readl(t, ETBR_READADDR);
        wp = etb_readl(t, ETBR_WRITEADDR);
-
-       if (rp > wp) {
-               etb_writel(t, 0, ETBR_READADDR);
-               etb_writel(t, 0, ETBR_WRITEADDR);
-
-               return 0;
-       }
-
-       return wp - rp;
+       return wp;
 }
 
 /* sysrq+v will always stop the running trace and leave it at that */
@@ -235,21 +328,18 @@ static void etm_dump(void)
                printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
        printk(KERN_INFO "\n--- ETB buffer end ---\n");
 
-       /* deassert the overflow bit */
-       etb_writel(t, 1, ETBR_CTRL);
-       etb_writel(t, 0, ETBR_CTRL);
-
-       etb_writel(t, 0, ETBR_TRIGGERCOUNT);
-       etb_writel(t, 0, ETBR_READADDR);
-       etb_writel(t, 0, ETBR_WRITEADDR);
-
        etb_lock(t);
 }
 
 static void sysrq_etm_dump(int key)
 {
+       if (!mutex_trylock(&tracer.mutex)) {
+               printk(KERN_INFO "Tracing hardware busy\n");
+               return;
+       }
        dev_dbg(tracer.dev, "Dumping ETB buffer\n");
        etm_dump();
+       mutex_unlock(&tracer.mutex);
 }
 
 static struct sysrq_key_op sysrq_etm_op = {
@@ -276,6 +366,10 @@ static ssize_t etb_read(struct file *file, char __user *data,
        struct tracectx *t = file->private_data;
        u32 first = 0;
        u32 *buf;
+       int wpos;
+       int skip;
+       long wlength;
+       loff_t pos = *ppos;
 
        mutex_lock(&t->mutex);
 
@@ -287,31 +381,39 @@ static ssize_t etb_read(struct file *file, char __user *data,
        etb_unlock(t);
 
        total = etb_getdatalen(t);
+       if (total == 0 && t->dump_initial_etb)
+               total = t->etb_bufsz;
        if (total == t->etb_bufsz)
                first = etb_readl(t, ETBR_WRITEADDR);
 
+       if (pos > total * 4) {
+               skip = 0;
+               wpos = total;
+       } else {
+               skip = (int)pos % 4;
+               wpos = (int)pos / 4;
+       }
+       total -= wpos;
+       first = (first + wpos) % t->etb_bufsz;
+
        etb_writel(t, first, ETBR_READADDR);
 
-       length = min(total * 4, (int)len);
-       buf = vmalloc(length);
+       wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4));
+       length = min(total * 4 - skip, (int)len);
+       buf = vmalloc(wlength * 4);
 
-       dev_dbg(t->dev, "ETB buffer length: %d\n", total);
+       dev_dbg(t->dev, "ETB read %ld bytes to %lld from %ld words at %d\n",
+               length, pos, wlength, first);
+       dev_dbg(t->dev, "ETB buffer length: %d\n", total + wpos);
        dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
-       for (i = 0; i < length / 4; i++)
+       for (i = 0; i < wlength; i++)
                buf[i] = etb_readl(t, ETBR_READMEM);
 
-       /* the only way to deassert overflow bit in ETB status is this */
-       etb_writel(t, 1, ETBR_CTRL);
-       etb_writel(t, 0, ETBR_CTRL);
-
-       etb_writel(t, 0, ETBR_WRITEADDR);
-       etb_writel(t, 0, ETBR_READADDR);
-       etb_writel(t, 0, ETBR_TRIGGERCOUNT);
-
        etb_lock(t);
 
-       length -= copy_to_user(data, buf, length);
+       length -= copy_to_user(data, (u8 *)buf + skip, length);
        vfree(buf);
+       *ppos = pos + length;
 
 out:
        mutex_unlock(&t->mutex);
@@ -348,28 +450,17 @@ static int etb_probe(struct amba_device *dev, const struct amba_id *id)
        if (ret)
                goto out;
 
+       mutex_lock(&t->mutex);
        t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
        if (!t->etb_regs) {
                ret = -ENOMEM;
                goto out_release;
        }
 
+       t->dev = &dev->dev;
+       t->dump_initial_etb = true;
        amba_set_drvdata(dev, t);
 
-       etb_miscdev.parent = &dev->dev;
-
-       ret = misc_register(&etb_miscdev);
-       if (ret)
-               goto out_unmap;
-
-       t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
-       if (IS_ERR(t->emu_clk)) {
-               dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
-               return -EFAULT;
-       }
-
-       clk_enable(t->emu_clk);
-
        etb_unlock(t);
        t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
        dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
@@ -378,6 +469,20 @@ static int etb_probe(struct amba_device *dev, const struct amba_id *id)
        etb_writel(t, 0, ETBR_CTRL);
        etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
        etb_lock(t);
+       mutex_unlock(&t->mutex);
+
+       etb_miscdev.parent = &dev->dev;
+
+       ret = misc_register(&etb_miscdev);
+       if (ret)
+               goto out_unmap;
+
+       /* Get optional clock. Currently used to select clock source on omap3 */
+       t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
+       if (IS_ERR(t->emu_clk))
+               dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
+       else
+               clk_enable(t->emu_clk);
 
        dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
 
@@ -385,10 +490,13 @@ out:
        return ret;
 
 out_unmap:
+       mutex_lock(&t->mutex);
        amba_set_drvdata(dev, NULL);
        iounmap(t->etb_regs);
+       t->etb_regs = NULL;
 
 out_release:
+       mutex_unlock(&t->mutex);
        amba_release_regions(dev);
 
        return ret;
@@ -403,8 +511,10 @@ static int etb_remove(struct amba_device *dev)
        iounmap(t->etb_regs);
        t->etb_regs = NULL;
 
-       clk_disable(t->emu_clk);
-       clk_put(t->emu_clk);
+       if (!IS_ERR(t->emu_clk)) {
+               clk_disable(t->emu_clk);
+               clk_put(t->emu_clk);
+       }
 
        amba_release_regions(dev);
 
@@ -448,7 +558,10 @@ static ssize_t trace_running_store(struct kobject *kobj,
                return -EINVAL;
 
        mutex_lock(&tracer.mutex);
-       ret = value ? trace_start(&tracer) : trace_stop(&tracer);
+       if (!tracer.etb_regs)
+               ret = -ENODEV;
+       else
+               ret = value ? trace_start(&tracer) : trace_stop(&tracer);
        mutex_unlock(&tracer.mutex);
 
        return ret ? : n;
@@ -463,36 +576,50 @@ static ssize_t trace_info_show(struct kobject *kobj,
 {
        u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
        int datalen;
+       int id;
+       int ret;
 
-       etb_unlock(&tracer);
-       datalen = etb_getdatalen(&tracer);
-       etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
-       etb_ra = etb_readl(&tracer, ETBR_READADDR);
-       etb_st = etb_readl(&tracer, ETBR_STATUS);
-       etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
-       etb_lock(&tracer);
-
-       etm_unlock(&tracer);
-       etm_ctrl = etm_readl(&tracer, ETMR_CTRL);
-       etm_st = etm_readl(&tracer, ETMR_STATUS);
-       etm_lock(&tracer);
+       mutex_lock(&tracer.mutex);
+       if (tracer.etb_regs) {
+               etb_unlock(&tracer);
+               datalen = etb_getdatalen(&tracer);
+               etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
+               etb_ra = etb_readl(&tracer, ETBR_READADDR);
+               etb_st = etb_readl(&tracer, ETBR_STATUS);
+               etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
+               etb_lock(&tracer);
+       } else {
+               etb_wa = etb_ra = etb_st = etb_fc = ~0;
+               datalen = -1;
+       }
 
-       return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
+       ret = sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
                        "ETBR_WRITEADDR:\t%08x\n"
                        "ETBR_READADDR:\t%08x\n"
                        "ETBR_STATUS:\t%08x\n"
-                       "ETBR_FORMATTERCTRL:\t%08x\n"
-                       "ETMR_CTRL:\t%08x\n"
-                       "ETMR_STATUS:\t%08x\n",
+                       "ETBR_FORMATTERCTRL:\t%08x\n",
                        datalen,
                        tracer.ncmppairs,
                        etb_wa,
                        etb_ra,
                        etb_st,
-                       etb_fc,
+                       etb_fc
+                       );
+
+       for (id = 0; id < tracer.etm_regs_count; id++) {
+               etm_unlock(&tracer, id);
+               etm_ctrl = etm_readl(&tracer, id, ETMR_CTRL);
+               etm_st = etm_readl(&tracer, id, ETMR_STATUS);
+               etm_lock(&tracer, id);
+               ret += sprintf(buf + ret, "ETMR_CTRL:\t%08x\n"
+                       "ETMR_STATUS:\t%08x\n",
                        etm_ctrl,
                        etm_st
                        );
+       }
+       mutex_unlock(&tracer.mutex);
+
+       return ret;
 }
 
 static struct kobj_attribute trace_info_attr =
@@ -531,42 +658,260 @@ static ssize_t trace_mode_store(struct kobject *kobj,
 static struct kobj_attribute trace_mode_attr =
        __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
 
+static ssize_t trace_contextid_size_show(struct kobject *kobj,
+                                        struct kobj_attribute *attr,
+                                        char *buf)
+{
+       /* 0: No context id tracing, 1: One byte, 2: Two bytes, 3: Four bytes */
+       return sprintf(buf, "%d\n", (1 << tracer.etm_contextid_size) >> 1);
+}
+
+static ssize_t trace_contextid_size_store(struct kobject *kobj,
+                                         struct kobj_attribute *attr,
+                                         const char *buf, size_t n)
+{
+       unsigned int contextid_size;
+
+       if (sscanf(buf, "%u", &contextid_size) != 1)
+               return -EINVAL;
+
+       if (contextid_size == 3 || contextid_size > 4)
+               return -EINVAL;
+
+       mutex_lock(&tracer.mutex);
+       tracer.etm_contextid_size = fls(contextid_size);
+       mutex_unlock(&tracer.mutex);
+
+       return n;
+}
+
+static struct kobj_attribute trace_contextid_size_attr =
+       __ATTR(trace_contextid_size, 0644,
+               trace_contextid_size_show, trace_contextid_size_store);
+
+static ssize_t trace_branch_output_show(struct kobject *kobj,
+                                       struct kobj_attribute *attr,
+                                       char *buf)
+{
+       return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_BRANCHOUTPUT));
+}
+
+static ssize_t trace_branch_output_store(struct kobject *kobj,
+                                        struct kobj_attribute *attr,
+                                        const char *buf, size_t n)
+{
+       unsigned int branch_output;
+
+       if (sscanf(buf, "%u", &branch_output) != 1)
+               return -EINVAL;
+
+       mutex_lock(&tracer.mutex);
+       if (branch_output) {
+               tracer.flags |= TRACER_BRANCHOUTPUT;
+               /* Branch broadcasting is incompatible with the return stack */
+               tracer.flags &= ~TRACER_RETURN_STACK;
+       } else {
+               tracer.flags &= ~TRACER_BRANCHOUTPUT;
+       }
+       mutex_unlock(&tracer.mutex);
+
+       return n;
+}
+
+static struct kobj_attribute trace_branch_output_attr =
+       __ATTR(trace_branch_output, 0644,
+               trace_branch_output_show, trace_branch_output_store);
+
+static ssize_t trace_return_stack_show(struct kobject *kobj,
+                                 struct kobj_attribute *attr,
+                                 char *buf)
+{
+       return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_RETURN_STACK));
+}
+
+static ssize_t trace_return_stack_store(struct kobject *kobj,
+                                  struct kobj_attribute *attr,
+                                  const char *buf, size_t n)
+{
+       unsigned int return_stack;
+
+       if (sscanf(buf, "%u", &return_stack) != 1)
+               return -EINVAL;
+
+       mutex_lock(&tracer.mutex);
+       if (return_stack) {
+               tracer.flags |= TRACER_RETURN_STACK;
+               /* Return stack is incompatible with branch broadcasting */
+               tracer.flags &= ~TRACER_BRANCHOUTPUT;
+       } else {
+               tracer.flags &= ~TRACER_RETURN_STACK;
+       }
+       mutex_unlock(&tracer.mutex);
+
+       return n;
+}
+
+static struct kobj_attribute trace_return_stack_attr =
+       __ATTR(trace_return_stack, 0644,
+               trace_return_stack_show, trace_return_stack_store);
+
+static ssize_t trace_timestamp_show(struct kobject *kobj,
+                                 struct kobj_attribute *attr,
+                                 char *buf)
+{
+       return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_TIMESTAMP));
+}
+
+static ssize_t trace_timestamp_store(struct kobject *kobj,
+                                  struct kobj_attribute *attr,
+                                  const char *buf, size_t n)
+{
+       unsigned int timestamp;
+
+       if (sscanf(buf, "%u", &timestamp) != 1)
+               return -EINVAL;
+
+       mutex_lock(&tracer.mutex);
+       if (timestamp)
+               tracer.flags |= TRACER_TIMESTAMP;
+       else
+               tracer.flags &= ~TRACER_TIMESTAMP;
+       mutex_unlock(&tracer.mutex);
+
+       return n;
+}
+
+static struct kobj_attribute trace_timestamp_attr =
+       __ATTR(trace_timestamp, 0644,
+               trace_timestamp_show, trace_timestamp_store);
+
+static ssize_t trace_range_show(struct kobject *kobj,
+                                 struct kobj_attribute *attr,
+                                 char *buf)
+{
+       return sprintf(buf, "%08lx %08lx\n",
+                       tracer.range_start, tracer.range_end);
+}
+
+static ssize_t trace_range_store(struct kobject *kobj,
+                                  struct kobj_attribute *attr,
+                                  const char *buf, size_t n)
+{
+       unsigned long range_start, range_end;
+
+       if (sscanf(buf, "%lx %lx", &range_start, &range_end) != 2)
+               return -EINVAL;
+
+       mutex_lock(&tracer.mutex);
+       tracer.range_start = range_start;
+       tracer.range_end = range_end;
+       mutex_unlock(&tracer.mutex);
+
+       return n;
+}
+
+
+static struct kobj_attribute trace_range_attr =
+       __ATTR(trace_range, 0644, trace_range_show, trace_range_store);
+
+static ssize_t trace_data_range_show(struct kobject *kobj,
+                                 struct kobj_attribute *attr,
+                                 char *buf)
+{
+       unsigned long range_start;
+       u64 range_end;
+       mutex_lock(&tracer.mutex);
+       range_start = tracer.data_range_start;
+       range_end = tracer.data_range_end;
+       if (!range_end && (tracer.flags & TRACER_TRACE_DATA))
+               range_end = 0x100000000ULL;
+       mutex_unlock(&tracer.mutex);
+       return sprintf(buf, "%08lx %08llx\n", range_start, range_end);
+}
+
+static ssize_t trace_data_range_store(struct kobject *kobj,
+                                  struct kobj_attribute *attr,
+                                  const char *buf, size_t n)
+{
+       unsigned long range_start;
+       u64 range_end;
+
+       if (sscanf(buf, "%lx %llx", &range_start, &range_end) != 2)
+               return -EINVAL;
+
+       mutex_lock(&tracer.mutex);
+       tracer.data_range_start = range_start;
+       tracer.data_range_end = (unsigned long)range_end;
+       if (range_end)
+               tracer.flags |= TRACER_TRACE_DATA;
+       else
+               tracer.flags &= ~TRACER_TRACE_DATA;
+       mutex_unlock(&tracer.mutex);
+
+       return n;
+}
+
+
+static struct kobj_attribute trace_data_range_attr =
+       __ATTR(trace_data_range, 0644,
+               trace_data_range_show, trace_data_range_store);
+
 static int etm_probe(struct amba_device *dev, const struct amba_id *id)
 {
        struct tracectx *t = &tracer;
        int ret = 0;
+       void __iomem **new_regs;
+       int new_count;
+       u32 etmccr;
+       u32 etmidr;
+       u32 etmccer = 0;
+       u8 etm_version = 0;
+
+       mutex_lock(&t->mutex);
+       new_count = t->etm_regs_count + 1;
+       new_regs = krealloc(t->etm_regs,
+                               sizeof(t->etm_regs[0]) * new_count, GFP_KERNEL);
 
-       if (t->etm_regs) {
-               dev_dbg(&dev->dev, "ETM already initialized\n");
-               ret = -EBUSY;
+       if (!new_regs) {
+               dev_dbg(&dev->dev, "Failed to allocate ETM register array\n");
+               ret = -ENOMEM;
                goto out;
        }
+       t->etm_regs = new_regs;
 
        ret = amba_request_regions(dev, NULL);
        if (ret)
                goto out;
 
-       t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
-       if (!t->etm_regs) {
+       t->etm_regs[t->etm_regs_count] =
+               ioremap_nocache(dev->res.start, resource_size(&dev->res));
+       if (!t->etm_regs[t->etm_regs_count]) {
                ret = -ENOMEM;
                goto out_release;
        }
 
-       amba_set_drvdata(dev, t);
+       amba_set_drvdata(dev, t->etm_regs[t->etm_regs_count]);
 
-       mutex_init(&t->mutex);
-       t->dev = &dev->dev;
-       t->flags = TRACER_CYCLE_ACC;
+       t->flags = TRACER_CYCLE_ACC | TRACER_TRACE_DATA | TRACER_BRANCHOUTPUT;
        t->etm_portsz = 1;
+       t->etm_contextid_size = 3;
 
-       etm_unlock(t);
-       (void)etm_readl(t, ETMMR_PDSR);
+       etm_unlock(t, t->etm_regs_count);
+       (void)etm_readl(t, t->etm_regs_count, ETMMR_PDSR);
        /* dummy first read */
-       (void)etm_readl(&tracer, ETMMR_OSSRR);
-
-       t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf;
-       etm_writel(t, 0x440, ETMR_CTRL);
-       etm_lock(t);
+       (void)etm_readl(&tracer, t->etm_regs_count, ETMMR_OSSRR);
+
+       etmccr = etm_readl(t, t->etm_regs_count, ETMR_CONFCODE);
+       t->ncmppairs = etmccr & 0xf;
+       if (etmccr & ETMCCR_ETMIDR_PRESENT) {
+               etmidr = etm_readl(t, t->etm_regs_count, ETMR_ID);
+               etm_version = ETMIDR_VERSION(etmidr);
+               if (etm_version >= ETMIDR_VERSION_3_1)
+                       etmccer = etm_readl(t, t->etm_regs_count, ETMR_CCE);
+       }
+       etm_writel(t, t->etm_regs_count, 0x441, ETMR_CTRL);
+       etm_writel(t, t->etm_regs_count, new_count, ETMR_TRACEIDR);
+       etm_lock(t, t->etm_regs_count);
 
        ret = sysfs_create_file(&dev->dev.kobj,
                        &trace_running_attr.attr);
@@ -582,36 +927,101 @@ static int etm_probe(struct amba_device *dev, const struct amba_id *id)
        if (ret)
                dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
 
-       dev_dbg(t->dev, "ETM AMBA driver initialized.\n");
+       ret = sysfs_create_file(&dev->dev.kobj,
+                               &trace_contextid_size_attr.attr);
+       if (ret)
+               dev_dbg(&dev->dev,
+                       "Failed to create trace_contextid_size in sysfs\n");
+
+       ret = sysfs_create_file(&dev->dev.kobj,
+                               &trace_branch_output_attr.attr);
+       if (ret)
+               dev_dbg(&dev->dev,
+                       "Failed to create trace_branch_output in sysfs\n");
+
+       if (etmccer & ETMCCER_RETURN_STACK_IMPLEMENTED) {
+               ret = sysfs_create_file(&dev->dev.kobj,
+                                       &trace_return_stack_attr.attr);
+               if (ret)
+                       dev_dbg(&dev->dev,
+                             "Failed to create trace_return_stack in sysfs\n");
+       }
+
+       if (etmccer & ETMCCER_TIMESTAMPING_IMPLEMENTED) {
+               ret = sysfs_create_file(&dev->dev.kobj,
+                                       &trace_timestamp_attr.attr);
+               if (ret)
+                       dev_dbg(&dev->dev,
+                               "Failed to create trace_timestamp in sysfs\n");
+       }
+
+       ret = sysfs_create_file(&dev->dev.kobj, &trace_range_attr.attr);
+       if (ret)
+               dev_dbg(&dev->dev, "Failed to create trace_range in sysfs\n");
+
+       if (etm_version < ETMIDR_VERSION_PFT_1_0) {
+               ret = sysfs_create_file(&dev->dev.kobj,
+                                       &trace_data_range_attr.attr);
+               if (ret)
+                       dev_dbg(&dev->dev,
+                               "Failed to create trace_data_range in sysfs\n");
+       } else {
+               tracer.flags &= ~TRACER_TRACE_DATA;
+       }
+
+       dev_dbg(&dev->dev, "ETM AMBA driver initialized.\n");
+
+       /* Enable formatter if there are multiple trace sources */
+       if (new_count > 1)
+               t->etb_fc = ETBFF_ENFCONT | ETBFF_ENFTC;
+
+       t->etm_regs_count = new_count;
 
 out:
+       mutex_unlock(&t->mutex);
        return ret;
 
 out_unmap:
        amba_set_drvdata(dev, NULL);
-       iounmap(t->etm_regs);
+       iounmap(t->etm_regs[t->etm_regs_count]);
 
 out_release:
        amba_release_regions(dev);
 
+       mutex_unlock(&t->mutex);
        return ret;
 }
 
 static int etm_remove(struct amba_device *dev)
 {
-       struct tracectx *t = amba_get_drvdata(dev);
+       int i;
+       struct tracectx *t = &tracer;
+       void __iomem    *etm_regs = amba_get_drvdata(dev);
+
+       sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
+       sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
+       sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
+       sysfs_remove_file(&dev->dev.kobj, &trace_range_attr.attr);
+       sysfs_remove_file(&dev->dev.kobj, &trace_data_range_attr.attr);
 
        amba_set_drvdata(dev, NULL);
 
-       iounmap(t->etm_regs);
-       t->etm_regs = NULL;
+       mutex_lock(&t->mutex);
+       for (i = 0; i < t->etm_regs_count; i++)
+               if (t->etm_regs[i] == etm_regs)
+                       break;
+       for (; i < t->etm_regs_count - 1; i++)
+               t->etm_regs[i] = t->etm_regs[i + 1];
+       t->etm_regs_count--;
+       if (!t->etm_regs_count) {
+               kfree(t->etm_regs);
+               t->etm_regs = NULL;
+       }
+       mutex_unlock(&t->mutex);
 
+       iounmap(etm_regs);
        amba_release_regions(dev);
 
-       sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
-       sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
-       sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
-
        return 0;
 }
 
@@ -620,6 +1030,10 @@ static struct amba_id etm_ids[] = {
                .id     = 0x0003b921,
                .mask   = 0x0007ffff,
        },
+       {
+               .id     = 0x0003b950,
+               .mask   = 0x0007ffff,
+       },
        { 0, 0 },
 };
 
@@ -637,6 +1051,8 @@ static int __init etm_init(void)
 {
        int retval;
 
+       mutex_init(&tracer.mutex);
+
        retval = amba_driver_register(&etb_driver);
        if (retval) {
                printk(KERN_ERR "Failed to register etb\n");
index 34e56647dceeee88d99f65d5fd0a6e00fb46a0fd..6a740a93f4bb3c293db3b9ea838101e0ec880474 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/ftrace.h>
+#include <linux/module.h>
 #include <linux/uaccess.h>
 
 #include <asm/cacheflush.h>
@@ -63,6 +64,20 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
 }
 #endif
 
+int ftrace_arch_code_modify_prepare(void)
+{
+       set_kernel_text_rw();
+       set_all_modules_text_rw();
+       return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+       set_all_modules_text_ro();
+       set_kernel_text_ro();
+       return 0;
+}
+
 static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
 {
        return arm_gen_branch_link(pc, addr);
index 778c2f7024ff57304227ce67665e749f39b05fc7..b321c8fbb87dc8d68b60712c6f74c5d306ca8777 100644 (file)
@@ -144,6 +144,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
 
 static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr)
 {
+       if (user_mode(regs))
+               return -1;
        kgdb_handle_exception(1, SIGTRAP, 0, regs);
 
        return 0;
@@ -151,6 +153,8 @@ static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr)
 
 static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
 {
+       if (user_mode(regs))
+               return -1;
        compiled_break = 1;
        kgdb_handle_exception(1, SIGTRAP, 0, regs);
 
index ac4c2e5e17e4ca5e0e0af30a8d5c70506394ceaa..978002e5b406224ea52c5f80980d32ba5d010d3a 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/hw_breakpoint.h>
 #include <linux/cpuidle.h>
 #include <linux/leds.h>
+#include <linux/console.h>
 
 #include <asm/cacheflush.h>
 #include <asm/idmap.h>
@@ -57,9 +58,46 @@ static const char *isa_modes[] = {
   "ARM" , "Thumb" , "Jazelle", "ThumbEE"
 };
 
+#ifdef CONFIG_SMP
+void arch_trigger_all_cpu_backtrace(void)
+{
+       smp_send_all_cpu_backtrace();
+}
+#else
+void arch_trigger_all_cpu_backtrace(void)
+{
+       dump_stack();
+}
+#endif
+
 extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
 typedef void (*phys_reset_t)(unsigned long);
 
+#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
+void arm_machine_flush_console(void)
+{
+       printk("\n");
+       pr_emerg("Restarting %s\n", linux_banner);
+       if (console_trylock()) {
+               console_unlock();
+               return;
+       }
+
+       mdelay(50);
+
+       local_irq_disable();
+       if (!console_trylock())
+               pr_emerg("arm_restart: Console was locked! Busting\n");
+       else
+               pr_emerg("arm_restart: Console was locked!\n");
+       console_unlock();
+}
+#else
+void arm_machine_flush_console(void)
+{
+}
+#endif
+
 /*
  * A temporary stack to use for CPU reset. This is static so that we
  * don't clobber it with the identity mapping. When running with this
@@ -147,6 +185,7 @@ void arch_cpu_idle_prepare(void)
 
 void arch_cpu_idle_enter(void)
 {
+       idle_notifier_call_chain(IDLE_START);
        ledtrig_cpu(CPU_LED_IDLE_START);
 #ifdef CONFIG_PL310_ERRATA_769419
        wmb();
@@ -156,6 +195,7 @@ void arch_cpu_idle_enter(void)
 void arch_cpu_idle_exit(void)
 {
        ledtrig_cpu(CPU_LED_IDLE_END);
+       idle_notifier_call_chain(IDLE_END);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -195,6 +235,16 @@ __setup("reboot=", reboot_setup);
  */
 void machine_shutdown(void)
 {
+#ifdef CONFIG_SMP
+       /*
+        * Disable preemption so we're guaranteed to
+        * run to power off or reboot and prevent
+        * the possibility of switching to another
+        * thread that might wind up blocking on
+        * one of the stopped CPUs.
+        */
+       preempt_disable();
+#endif
        disable_nonboot_cpus();
 }
 
@@ -240,6 +290,10 @@ void machine_restart(char *cmd)
 {
        smp_send_stop();
 
+       /* Flush the console to make sure all the relevant messages make it
+        * out to the console drivers */
+       arm_machine_flush_console();
+
        arm_pm_restart(reboot_mode, cmd);
 
        /* Give a grace period for failure to restart of 1s */
@@ -251,6 +305,77 @@ void machine_restart(char *cmd)
        while (1);
 }
 
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+       int     i, j;
+       int     nlines;
+       u32     *p;
+
+       /*
+        * don't attempt to dump non-kernel addresses or
+        * values that are probably just small negative numbers
+        */
+       if (addr < PAGE_OFFSET || addr > -256UL)
+               return;
+
+       printk("\n%s: %#lx:\n", name, addr);
+
+       /*
+        * round address down to a 32 bit boundary
+        * and always dump a multiple of 32 bytes
+        */
+       p = (u32 *)(addr & ~(sizeof(u32) - 1));
+       nbytes += (addr & (sizeof(u32) - 1));
+       nlines = (nbytes + 31) / 32;
+
+
+       for (i = 0; i < nlines; i++) {
+               /*
+                * just display low 16 bits of address to keep
+                * each line of the dump < 80 characters
+                */
+               printk("%04lx ", (unsigned long)p & 0xffff);
+               for (j = 0; j < 8; j++) {
+                       u32     data;
+                       if (probe_kernel_address(p, data)) {
+                               printk(" ********");
+                       } else {
+                               printk(" %08x", data);
+                       }
+                       ++p;
+               }
+               printk("\n");
+       }
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+       mm_segment_t fs;
+
+       fs = get_fs();
+       set_fs(KERNEL_DS);
+       show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
+       show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
+       show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
+       show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
+       show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
+       show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
+       show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
+       show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
+       show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
+       show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
+       show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
+       show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
+       show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
+       show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
+       show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
+       show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
+       set_fs(fs);
+}
+
 void __show_regs(struct pt_regs *regs)
 {
        unsigned long flags;
@@ -307,6 +432,8 @@ void __show_regs(struct pt_regs *regs)
                printk("Control: %08x%s\n", ctrl, buf);
        }
 #endif
+
+       show_extra_register_data(regs, 128);
 }
 
 void show_regs(struct pt_regs * regs)
index dc2843f337af9dae837ae1ec140eca77463f9489..ed3243bb6c07436545912fb40c94c146415b8730 100644 (file)
@@ -70,6 +70,7 @@ enum ipi_msg_type {
        IPI_CALL_FUNC_SINGLE,
        IPI_CPU_STOP,
        IPI_COMPLETION,
+       IPI_CPU_BACKTRACE,
 };
 
 static DECLARE_COMPLETION(cpu_running);
@@ -468,6 +469,7 @@ static const char *ipi_types[NR_IPI] = {
        S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
        S(IPI_CPU_STOP, "CPU stop interrupts"),
        S(IPI_COMPLETION, "completion interrupts"),
+       S(IPI_CPU_BACKTRACE, "CPU backtrace"),
 };
 
 void show_ipi_list(struct seq_file *p, int prec)
@@ -606,6 +608,58 @@ static void ipi_complete(unsigned int cpu)
        complete(per_cpu(cpu_completion, cpu));
 }
 
+static cpumask_t backtrace_mask;
+static DEFINE_RAW_SPINLOCK(backtrace_lock);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+void smp_send_all_cpu_backtrace(void)
+{
+       unsigned int this_cpu = smp_processor_id();
+       int i;
+
+       if (test_and_set_bit(0, &backtrace_flag))
+               /*
+                * If there is already a trigger_all_cpu_backtrace() in progress
+                * (backtrace_flag == 1), don't output double cpu dump infos.
+                */
+               return;
+
+       cpumask_copy(&backtrace_mask, cpu_online_mask);
+       cpu_clear(this_cpu, backtrace_mask);
+
+       pr_info("Backtrace for cpu %d (current):\n", this_cpu);
+       dump_stack();
+
+       pr_info("\nsending IPI to all other CPUs:\n");
+       smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
+
+       /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
+       for (i = 0; i < 10 * 1000; i++) {
+               if (cpumask_empty(&backtrace_mask))
+                       break;
+               mdelay(1);
+       }
+
+       clear_bit(0, &backtrace_flag);
+       smp_mb__after_clear_bit();
+}
+
+/*
+ * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
+ */
+static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
+{
+       if (cpu_isset(cpu, backtrace_mask)) {
+               raw_spin_lock(&backtrace_lock);
+               pr_warning("IPI backtrace for cpu %d\n", cpu);
+               show_regs(regs);
+               raw_spin_unlock(&backtrace_lock);
+               cpu_clear(cpu, backtrace_mask);
+       }
+}
+
 /*
  * Main handler for inter-processor interrupts
  */
@@ -663,6 +717,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
                irq_exit();
                break;
 
+       case IPI_CPU_BACKTRACE:
+               ipi_cpu_backtrace(cpu, regs);
+               break;
+
        default:
                printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
                       cpu, ipinr);
index 9e51be96f635b5945b978bed2c6978fd3d2b61ba..8045a48c8476ed8c0ad8e0601561bb3d03d2c5a1 100644 (file)
@@ -7,6 +7,7 @@ obj-y                           := dma-mapping.o extable.o fault.o init.o \
 
 obj-$(CONFIG_MMU)              += fault-armv.o flush.o idmap.o ioremap.o \
                                   mmap.o pgd.o mmu.o
+obj-$(CONFIG_DEBUG_RODATA)     += rodata.o
 
 ifneq ($(CONFIG_MMU),y)
 obj-y                          += nommu.o
index c465faca51b06b05ed50c27962b8b169198ea909..90a130f98acfd1b527d096b034b1632fcf93bd74 100644 (file)
@@ -33,6 +33,9 @@ static void __iomem *l2x0_base;
 static DEFINE_RAW_SPINLOCK(l2x0_lock);
 static u32 l2x0_way_mask;      /* Bitmask of active ways */
 static u32 l2x0_size;
+static u32 l2x0_cache_id;
+static unsigned int l2x0_sets;
+static unsigned int l2x0_ways;
 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
 
 /* Aurora don't have the cache ID register available, so we have to
@@ -49,6 +52,13 @@ struct l2x0_of_data {
 
 static bool of_init = false;
 
+static inline bool is_pl310_rev(int rev)
+{
+       return (l2x0_cache_id &
+               (L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) ==
+                       (L2X0_CACHE_ID_PART_L310 | rev);
+}
+
 static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
 {
        /* wait for cache operation by line or way to complete */
@@ -137,6 +147,23 @@ static void l2x0_cache_sync(void)
        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
+#ifdef CONFIG_PL310_ERRATA_727915
+static void l2x0_for_each_set_way(void __iomem *reg)
+{
+       int set;
+       int way;
+       unsigned long flags;
+
+       for (way = 0; way < l2x0_ways; way++) {
+               raw_spin_lock_irqsave(&l2x0_lock, flags);
+               for (set = 0; set < l2x0_sets; set++)
+                       writel_relaxed((way << 28) | (set << 5), reg);
+               cache_sync();
+               raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+       }
+}
+#endif
+
 static void __l2x0_flush_all(void)
 {
        debug_writel(0x03);
@@ -150,6 +177,13 @@ static void l2x0_flush_all(void)
 {
        unsigned long flags;
 
+#ifdef CONFIG_PL310_ERRATA_727915
+       if (is_pl310_rev(REV_PL310_R2P0)) {
+               l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX);
+               return;
+       }
+#endif
+
        /* clean all ways */
        raw_spin_lock_irqsave(&l2x0_lock, flags);
        __l2x0_flush_all();
@@ -160,11 +194,20 @@ static void l2x0_clean_all(void)
 {
        unsigned long flags;
 
+#ifdef CONFIG_PL310_ERRATA_727915
+       if (is_pl310_rev(REV_PL310_R2P0)) {
+               l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX);
+               return;
+       }
+#endif
+
        /* clean all ways */
        raw_spin_lock_irqsave(&l2x0_lock, flags);
+       debug_writel(0x03);
        writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
        cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
        cache_sync();
+       debug_writel(0x00);
        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
@@ -323,65 +366,64 @@ static void l2x0_unlock(u32 cache_id)
 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
 {
        u32 aux;
-       u32 cache_id;
        u32 way_size = 0;
-       int ways;
        int way_size_shift = L2X0_WAY_SIZE_SHIFT;
        const char *type;
 
        l2x0_base = base;
        if (cache_id_part_number_from_dt)
-               cache_id = cache_id_part_number_from_dt;
+               l2x0_cache_id = cache_id_part_number_from_dt;
        else
-               cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+               l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
        aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 
        aux &= aux_mask;
        aux |= aux_val;
 
        /* Determine the number of ways */
-       switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+       switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
        case L2X0_CACHE_ID_PART_L310:
                if (aux & (1 << 16))
-                       ways = 16;
+                       l2x0_ways = 16;
                else
-                       ways = 8;
+                       l2x0_ways = 8;
                type = "L310";
 #ifdef CONFIG_PL310_ERRATA_753970
                /* Unmapped register. */
                sync_reg_offset = L2X0_DUMMY_REG;
 #endif
-               if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
+               if ((l2x0_cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
                        outer_cache.set_debug = pl310_set_debug;
                break;
        case L2X0_CACHE_ID_PART_L210:
-               ways = (aux >> 13) & 0xf;
+               l2x0_ways = (aux >> 13) & 0xf;
                type = "L210";
                break;
 
        case AURORA_CACHE_ID:
                sync_reg_offset = AURORA_SYNC_REG;
-               ways = (aux >> 13) & 0xf;
-               ways = 2 << ((ways + 1) >> 2);
+               l2x0_ways = (aux >> 13) & 0xf;
+               l2x0_ways = 2 << ((l2x0_ways + 1) >> 2);
                way_size_shift = AURORA_WAY_SIZE_SHIFT;
                type = "Aurora";
                break;
        default:
                /* Assume unknown chips have 8 ways */
-               ways = 8;
+               l2x0_ways = 8;
                type = "L2x0 series";
                break;
        }
 
-       l2x0_way_mask = (1 << ways) - 1;
+       l2x0_way_mask = (1 << l2x0_ways) - 1;
 
        /*
         * L2 cache Size =  Way size * Number of ways
         */
        way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
-       way_size = 1 << (way_size + way_size_shift);
+       way_size = SZ_1K << (way_size + way_size_shift);
 
-       l2x0_size = ways * way_size * SZ_1K;
+       l2x0_size = l2x0_ways * way_size;
+       l2x0_sets = way_size / CACHE_LINE_SIZE;
 
        /*
         * Check if l2x0 controller is already enabled.
@@ -390,7 +432,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
         */
        if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
                /* Make sure that I&D is not locked down when starting */
-               l2x0_unlock(cache_id);
+               l2x0_unlock(l2x0_cache_id);
 
                /* l2x0 controller is disabled */
                writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
@@ -419,7 +461,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
 
        printk(KERN_INFO "%s cache controller enabled\n", type);
        printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
-                       ways, cache_id, aux, l2x0_size);
+                       l2x0_ways, l2x0_cache_id, aux, l2x0_size);
 }
 
 #ifdef CONFIG_OF
index d8fd4d4bd3d45ecdc66ad2c74885795df9681ea7..7a3d3d8d98d7fa384e998d271b1ccbbf5b3de256 100644 (file)
@@ -270,6 +270,11 @@ v6_dma_clean_range:
  *     - end     - virtual end address of region
  */
 ENTRY(v6_dma_flush_range)
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+       sub     r2, r1, r0
+       cmp     r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT
+       bhi     v6_dma_flush_dcache_all
+#endif
 #ifdef CONFIG_DMA_CACHE_RWFO
        ldrb    r2, [r0]                @ read for ownership
        strb    r2, [r0]                @ write for ownership
@@ -292,6 +297,18 @@ ENTRY(v6_dma_flush_range)
        mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer
        mov     pc, lr
 
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+v6_dma_flush_dcache_all:
+       mov     r0, #0
+#ifdef HARVARD_CACHE
+       mcr     p15, 0, r0, c7, c14, 0          @ D cache clean+invalidate
+#else
+       mcr     p15, 0, r0, c7, c15, 0          @ Cache clean+invalidate
+#endif
+       mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer
+       mov     pc, lr
+#endif
+
 /*
  *     dma_map_area(start, size, dir)
  *     - start - kernel virtual start address
index e207aa5f846f602ec0fc843df89c8a81a5994e7b..56059a5be9a89adaa588d7275beb3362f7d2cc03 100644 (file)
@@ -276,10 +276,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
                local_irq_enable();
 
        /*
-        * If we're in an interrupt or have no user
+        * If we're in an interrupt, or have no irqs, or have no user
         * context, we must not take the fault..
         */
-       if (in_atomic() || !mm)
+       if (in_atomic() || irqs_disabled() || !mm)
                goto no_context;
 
        /*
index fb3c446af9e5ea4ddd98c85bfbd36db135092f2f..95f7a1c903262e6cacea7951e7371a57c13cd301 100644 (file)
@@ -605,11 +605,25 @@ static void __init *early_alloc(unsigned long sz)
        return early_alloc_aligned(sz, sz);
 }
 
-static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
+static pte_t * __init early_pte_alloc(pmd_t *pmd)
+{
+       if (pmd_none(*pmd) || pmd_bad(*pmd))
+               return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+       return pmd_page_vaddr(*pmd);
+}
+
+static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot)
+{
+       __pmd_populate(pmd, __pa(pte), prot);
+       BUG_ON(pmd_bad(*pmd));
+}
+
+static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
+       unsigned long addr, unsigned long prot)
 {
        if (pmd_none(*pmd)) {
-               pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
-               __pmd_populate(pmd, __pa(pte), prot);
+               pte_t *pte = early_pte_alloc(pmd);
+               early_pte_install(pmd, pte, prot);
        }
        BUG_ON(pmd_bad(*pmd));
        return pte_offset_kernel(pmd, addr);
@@ -619,11 +633,17 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
                                  unsigned long end, unsigned long pfn,
                                  const struct mem_type *type)
 {
-       pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
+       pte_t *start_pte = early_pte_alloc(pmd);
+       pte_t *pte = start_pte + pte_index(addr);
+
+       /* If replacing a section mapping, the whole section must be replaced */
+       BUG_ON(!pmd_none(*pmd) && pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
+
        do {
                set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
+       early_pte_install(pmd, start_pte, type->prot_l1);
 }
 
 static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
@@ -655,7 +675,8 @@ static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
 
 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
                                      unsigned long end, phys_addr_t phys,
-                                     const struct mem_type *type)
+                                     const struct mem_type *type,
+                                     bool force_pages)
 {
        pmd_t *pmd = pmd_offset(pud, addr);
        unsigned long next;
@@ -672,7 +693,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
                 * aligned to a section boundary.
                 */
                if (type->prot_sect &&
-                               ((addr | next | phys) & ~SECTION_MASK) == 0) {
+                               ((addr | next | phys) & ~SECTION_MASK) == 0 &&
+                               !force_pages) {
                        __map_init_section(pmd, addr, next, phys, type);
                } else {
                        alloc_init_pte(pmd, addr, next,
@@ -685,14 +707,15 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
 }
 
 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
-       unsigned long end, unsigned long phys, const struct mem_type *type)
+       unsigned long end, unsigned long phys, const struct mem_type *type,
+       bool force_pages)
 {
        pud_t *pud = pud_offset(pgd, addr);
        unsigned long next;
 
        do {
                next = pud_addr_end(addr, end);
-               alloc_init_pmd(pud, addr, next, phys, type);
+               alloc_init_pmd(pud, addr, next, phys, type, force_pages);
                phys += next - addr;
        } while (pud++, addr = next, addr != end);
 }
@@ -766,7 +789,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
  * offsets, and we take full advantage of sections and
  * supersections.
  */
-static void __init create_mapping(struct map_desc *md)
+static void __init create_mapping(struct map_desc *md, bool force_pages)
 {
        unsigned long addr, length, end;
        phys_addr_t phys;
@@ -816,7 +839,7 @@ static void __init create_mapping(struct map_desc *md)
        do {
                unsigned long next = pgd_addr_end(addr, end);
 
-               alloc_init_pud(pgd, addr, next, phys, type);
+               alloc_init_pud(pgd, addr, next, phys, type, force_pages);
 
                phys += next - addr;
                addr = next;
@@ -838,7 +861,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
        svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
 
        for (md = io_desc; nr; md++, nr--) {
-               create_mapping(md);
+               create_mapping(md, false);
 
                vm = &svm->vm;
                vm->addr = (void *)(md->virtual & PAGE_MASK);
@@ -959,7 +982,7 @@ void __init debug_ll_io_init(void)
        map.virtual &= PAGE_MASK;
        map.length = PAGE_SIZE;
        map.type = MT_DEVICE;
-       create_mapping(&map);
+       create_mapping(&map, false);
 }
 #endif
 
@@ -1004,6 +1027,28 @@ void __init sanity_check_meminfo(void)
                struct membank *bank = &meminfo.bank[j];
                *bank = meminfo.bank[i];
 
+#ifdef CONFIG_SPARSEMEM
+               if (pfn_to_section_nr(bank_pfn_start(bank)) !=
+                   pfn_to_section_nr(bank_pfn_end(bank) - 1)) {
+                       phys_addr_t sz;
+                       unsigned long start_pfn = bank_pfn_start(bank);
+                       unsigned long end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
+                       sz = ((phys_addr_t)(end_pfn - start_pfn) << PAGE_SHIFT);
+
+                       if (meminfo.nr_banks >= NR_BANKS) {
+                               pr_crit("NR_BANKS too low, ignoring %lld bytes of memory\n",
+                                       (unsigned long long)(bank->size - sz));
+                       } else {
+                               memmove(bank + 1, bank,
+                                       (meminfo.nr_banks - i) * sizeof(*bank));
+                               meminfo.nr_banks++;
+                               bank[1].size -= sz;
+                               bank[1].start = __pfn_to_phys(end_pfn);
+                       }
+                       bank->size = sz;
+               }
+#endif
+
                if (bank->start > ULONG_MAX)
                        highmem = 1;
 
@@ -1201,7 +1246,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
        map.virtual = MODULES_VADDR;
        map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
        map.type = MT_ROM;
-       create_mapping(&map);
+       create_mapping(&map, false);
 #endif
 
        /*
@@ -1212,14 +1257,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
        map.virtual = FLUSH_BASE;
        map.length = SZ_1M;
        map.type = MT_CACHECLEAN;
-       create_mapping(&map);
+       create_mapping(&map, false);
 #endif
 #ifdef FLUSH_BASE_MINICACHE
        map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
        map.virtual = FLUSH_BASE_MINICACHE;
        map.length = SZ_1M;
        map.type = MT_MINICLEAN;
-       create_mapping(&map);
+       create_mapping(&map, false);
 #endif
 
        /*
@@ -1235,13 +1280,13 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
 #else
        map.type = MT_LOW_VECTORS;
 #endif
-       create_mapping(&map);
+       create_mapping(&map, false);
 
        if (!vectors_high()) {
                map.virtual = 0;
                map.length = PAGE_SIZE * 2;
                map.type = MT_LOW_VECTORS;
-               create_mapping(&map);
+               create_mapping(&map, false);
        }
 
        /* Now create a kernel read-only mapping */
@@ -1249,7 +1294,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
        map.virtual = 0xffff0000 + PAGE_SIZE;
        map.length = PAGE_SIZE;
        map.type = MT_LOW_VECTORS;
-       create_mapping(&map);
+       create_mapping(&map, false);
 
        /*
         * Ask the machine support to map in the statically mapped devices.
@@ -1274,20 +1319,23 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
 static void __init kmap_init(void)
 {
 #ifdef CONFIG_HIGHMEM
-       pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
+       pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),
                PKMAP_BASE, _PAGE_KERNEL_TABLE);
 #endif
 }
 
+
 static void __init map_lowmem(void)
 {
        struct memblock_region *reg;
+       phys_addr_t start;
+       phys_addr_t end;
+       struct map_desc map;
 
        /* Map all the lowmem memory banks. */
        for_each_memblock(memory, reg) {
-               phys_addr_t start = reg->base;
-               phys_addr_t end = start + reg->size;
-               struct map_desc map;
+               start = reg->base;
+               end = start + reg->size;
 
                if (end > arm_lowmem_limit)
                        end = arm_lowmem_limit;
@@ -1299,8 +1347,20 @@ static void __init map_lowmem(void)
                map.length = end - start;
                map.type = MT_MEMORY;
 
-               create_mapping(&map);
+               create_mapping(&map, false);
        }
+
+#ifdef CONFIG_DEBUG_RODATA
+       start = __pa(_stext) & PMD_MASK;
+       end = ALIGN(__pa(__end_rodata), PMD_SIZE);
+
+       map.pfn = __phys_to_pfn(start);
+       map.virtual = __phys_to_virt(start);
+       map.length = end - start;
+       map.type = MT_MEMORY;
+
+       create_mapping(&map, true);
+#endif
 }
 
 /*
diff --git a/arch/arm/mm/rodata.c b/arch/arm/mm/rodata.c
new file mode 100644 (file)
index 0000000..9a8eb84
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+ *  linux/arch/arm/mm/rodata.c
+ *
+ *  Copyright (C) 2011 Google, Inc.
+ *
+ *  Author: Colin Cross <ccross@android.com>
+ *
+ *  Based on x86 implementation in arch/x86/mm/init_32.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include <asm/cache.h>
+#include <asm/pgtable.h>
+#include <asm/rodata.h>
+#include <asm/sections.h>
+#include <asm/tlbflush.h>
+
+#include "mm.h"
+
+static int kernel_set_to_readonly __read_mostly;
+
+#ifdef CONFIG_DEBUG_RODATA_TEST
+static const int rodata_test_data = 0xC3;
+
+static noinline void rodata_test(void)
+{
+       int result;
+
+       pr_info("%s: attempting to write to read-only section:\n", __func__);
+
+       if (*(volatile int *)&rodata_test_data != 0xC3) {
+               pr_err("read only data changed before test\n");
+               return;
+       }
+
+       /*
+        * Attempt to to write to rodata_test_data, trapping the expected
+        * data abort.  If the trap executed, result will be 1.  If it didn't,
+        * result will be 0xFF.
+        */
+       asm volatile(
+               "0:     str     %[zero], [%[rodata_test_data]]\n"
+               "       mov     %[result], #0xFF\n"
+               "       b       2f\n"
+               "1:     mov     %[result], #1\n"
+               "2:\n"
+
+               /* Exception fixup - if store at label 0 faults, jumps to 1 */
+               ".pushsection __ex_table, \"a\"\n"
+               "       .long   0b, 1b\n"
+               ".popsection\n"
+
+               : [result] "=r" (result)
+               : [rodata_test_data] "r" (&rodata_test_data), [zero] "r" (0)
+               : "memory"
+       );
+
+       if (result == 1)
+               pr_info("write to read-only section trapped, success\n");
+       else
+               pr_err("write to read-only section NOT trapped, test failed\n");
+
+       if (*(volatile int *)&rodata_test_data != 0xC3)
+               pr_err("read only data changed during write\n");
+}
+#else
+static inline void rodata_test(void) { }
+#endif
+
+static int set_page_attributes(unsigned long virt, int numpages,
+       pte_t (*f)(pte_t))
+{
+       pmd_t *pmd;
+       pte_t *pte;
+       unsigned long start = virt;
+       unsigned long end = virt + (numpages << PAGE_SHIFT);
+       unsigned long pmd_end;
+
+       while (virt < end) {
+               pmd = pmd_off_k(virt);
+               pmd_end = min(ALIGN(virt + 1, PMD_SIZE), end);
+
+               if ((pmd_val(*pmd) & PMD_TYPE_MASK) != PMD_TYPE_TABLE) {
+                       pr_err("%s: pmd %p=%08lx for %08lx not page table\n",
+                               __func__, pmd, pmd_val(*pmd), virt);
+                       virt = pmd_end;
+                       continue;
+               }
+
+               while (virt < pmd_end) {
+                       pte = pte_offset_kernel(pmd, virt);
+                       set_pte_ext(pte, f(*pte), 0);
+                       virt += PAGE_SIZE;
+               }
+       }
+
+       flush_tlb_kernel_range(start, end);
+
+       return 0;
+}
+
+int set_memory_ro(unsigned long virt, int numpages)
+{
+       return set_page_attributes(virt, numpages, pte_wrprotect);
+}
+EXPORT_SYMBOL(set_memory_ro);
+
+int set_memory_rw(unsigned long virt, int numpages)
+{
+       return set_page_attributes(virt, numpages, pte_mkwrite);
+}
+EXPORT_SYMBOL(set_memory_rw);
+
+void set_kernel_text_rw(void)
+{
+       unsigned long start = PAGE_ALIGN((unsigned long)_text);
+       unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
+
+       if (!kernel_set_to_readonly)
+               return;
+
+       pr_debug("Set kernel text: %lx - %lx to read-write\n",
+                start, start + size);
+
+       set_memory_rw(start, size >> PAGE_SHIFT);
+}
+
+void set_kernel_text_ro(void)
+{
+       unsigned long start = PAGE_ALIGN((unsigned long)_text);
+       unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
+
+       if (!kernel_set_to_readonly)
+               return;
+
+       pr_info_once("Write protecting the kernel text section %lx - %lx\n",
+               start, start + size);
+
+       pr_debug("Set kernel text: %lx - %lx to read only\n",
+                start, start + size);
+
+       set_memory_ro(start, size >> PAGE_SHIFT);
+}
+
+void mark_rodata_ro(void)
+{
+       kernel_set_to_readonly = 1;
+
+       set_kernel_text_ro();
+
+       rodata_test();
+}
index 3a8bfb12f7262b3fb1bffda3ab941cf2190f1007..9af67474016cee460a11f4fc7be87dc67b16b3be 100644 (file)
@@ -365,6 +365,23 @@ config CMDLINE
          entering them here. As a minimum, you should specify the the
          root device (e.g. root=/dev/nfs).
 
+choice
+       prompt "Kernel command line type" if CMDLINE != ""
+       default CMDLINE_FROM_BOOTLOADER
+
+config CMDLINE_FROM_BOOTLOADER
+       bool "Use bootloader kernel arguments if available"
+       help
+         Uses the command-line options passed by the boot loader. If
+         the boot loader doesn't provide any, the default kernel command
+         string provided in CMDLINE will be used.
+
+config CMDLINE_EXTEND
+       bool "Extend bootloader kernel arguments"
+       help
+         The command-line arguments provided by the boot loader will be
+         appended to the default kernel command string.
+
 config CMDLINE_FORCE
        bool "Always use the default kernel command string"
        help
@@ -372,6 +389,22 @@ config CMDLINE_FORCE
          loader passes other arguments to the kernel.
          This is useful if you cannot or don't want to change the
          command-line options your boot loader passes to the kernel.
+endchoice
+
+config BUILD_ARM64_APPENDED_DTB_IMAGE
+       bool "Build a concatenated Image.gz/dtb by default"
+       depends on OF
+       help
+         Enabling this option will cause a concatenated Image.gz and list of
+         DTBs to be built by default (instead of a standalone Image.gz.)
+         The image will built in arch/arm64/boot/Image.gz-dtb
+
+config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES
+       string "Default dtb names"
+       depends on BUILD_ARM64_APPENDED_DTB_IMAGE
+       help
+         Space separated list of names of dtbs to append when
+         building a concatenated Image.gz-dtb.
 
 config EFI
        bool "UEFI runtime support"
index 8f63c8a21b7edd7f8ffcab513c8a230d8859a5c1..28750a191dd86716da13e26ea3271cc7b3e61b9e 100644 (file)
@@ -48,7 +48,12 @@ libs-y               := arch/arm64/lib/ $(libs-y)
 libs-y         += $(LIBGCC)
 
 # Default target when executing plain make
+ifeq ($(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE),y)
+KBUILD_IMAGE   := Image.gz-dtb
+else
 KBUILD_IMAGE   := Image.gz
+endif
+
 KBUILD_DTBS    := dtbs
 
 all:   $(KBUILD_IMAGE) $(KBUILD_DTBS)
@@ -67,6 +72,9 @@ zinstall install: vmlinux
 dtbs: scripts
        $(Q)$(MAKE) $(build)=$(boot)/dts dtbs
 
+Image.gz-dtb: vmlinux scripts dtbs
+       $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
 PHONY += vdso_install
 vdso_install:
        $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
index 8dab0bb6ae667c5f89da8aca74a0a3bb61dcfaa9..eb3551131b1e7c04930808c2b4834c7fc6f9817e 100644 (file)
@@ -1,2 +1,3 @@
 Image
 Image.gz
+Image.gz-dtb
index 5a0e3ab854a574fbf141b98fec7f0447daa64762..df519849fa00fdf246c5fa1a686accfc9cd40e2f 100644 (file)
 # Based on the ia64 boot/Makefile.
 #
 
+include $(srctree)/arch/arm64/boot/dts/Makefile
+
 targets := Image Image.gz
 
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
+
 $(obj)/Image: vmlinux FORCE
        $(call if_changed,objcopy)
 
 $(obj)/Image.gz: $(obj)/Image FORCE
        $(call if_changed,gzip)
 
+$(obj)/Image.gz-dtb: $(obj)/Image.gz $(DTB_OBJS) FORCE
+       $(call if_changed,cat)
+
 install: $(obj)/Image
        $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
        $(obj)/Image System.map "$(INSTALL_PATH)"
index ef388176116d2c7ea3cdc3f66fd6d63001f3f278..661015fd7748bf0339e78bff906ec7b095ff3679 100644 (file)
@@ -4,8 +4,15 @@ dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb
 dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb
 
 targets += dtbs
-targets += $(dtb-y)
 
-dtbs: $(addprefix $(obj)/, $(dtb-y))
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+targets += $(DTB_LIST)
 
-clean-files := *.dtb
+dtbs: $(addprefix $(obj)/, $(DTB_LIST))
+
+clean-files := dts/*.dtb *.dtb
index a84d4c8acbbe65847621fb4f0ee6f40705fee177..ddb9d7830558f0840bf7b71fac33a20388c4a1f6 100644 (file)
@@ -168,8 +168,8 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
 ({ \
        __typeof__(*(ptr)) __ret; \
        __ret = (__typeof__(*(ptr))) \
-       __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
-               sizeof(*(ptr))); \
+               __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
+                            sizeof(*(ptr))); \
        __ret; \
 })
 
@@ -177,8 +177,8 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
 ({ \
        __typeof__(*(ptr)) __ret; \
        __ret = (__typeof__(*(ptr))) \
-       __cmpxchg((ptr), (unsigned long)(o), \
-               (unsigned long)(n), sizeof(*(ptr))); \
+               __cmpxchg((ptr), (unsigned long)(o), \
+                         (unsigned long)(n), sizeof(*(ptr))); \
        __ret; \
 })
 
index c5d1785373ed38c25f1096467d103cd6440c1b66..02bab09707f28cc7dd2a10bc1a3b9af35f8e1aef 100644 (file)
@@ -1,13 +1,6 @@
 #ifndef _ASM_X86_IDLE_H
 #define _ASM_X86_IDLE_H
 
-#define IDLE_START 1
-#define IDLE_END 2
-
-struct notifier_block;
-void idle_notifier_register(struct notifier_block *n);
-void idle_notifier_unregister(struct notifier_block *n);
-
 #ifdef CONFIG_X86_64
 void enter_idle(void);
 void exit_idle(void);
index 59b90379cb6adc1092c5d7a2056afab53e80efc6..48f4399534361d9e75d95493b75e9e54b87b117b 100644 (file)
@@ -40,19 +40,6 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
 
 #ifdef CONFIG_X86_64
 static DEFINE_PER_CPU(unsigned char, is_idle);
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
-       atomic_notifier_chain_register(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_register);
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
-       atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
 #endif
 
 struct kmem_cache *task_xstate_cachep;
@@ -257,14 +244,14 @@ static inline void play_dead(void)
 void enter_idle(void)
 {
        this_cpu_write(is_idle, 1);
-       atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+       idle_notifier_call_chain(IDLE_START);
 }
 
 static void __exit_idle(void)
 {
        if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
                return;
-       atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+       idle_notifier_call_chain(IDLE_END);
 }
 
 /* Called from interrupts to signify idle end */
index dadf42b454a383299231fa46abee1631aab53196..6acda145311ff7bd58a9d831c4568159de7dab8f 100644 (file)
@@ -1107,6 +1107,22 @@ static void disk_release(struct device *dev)
                blk_put_queue(disk->queue);
        kfree(disk);
 }
+
+static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct gendisk *disk = dev_to_disk(dev);
+       struct disk_part_iter piter;
+       struct hd_struct *part;
+       int cnt = 0;
+
+       disk_part_iter_init(&piter, disk, 0);
+       while((part = disk_part_iter_next(&piter)))
+               cnt++;
+       disk_part_iter_exit(&piter);
+       add_uevent_var(env, "NPARTS=%u", cnt);
+       return 0;
+}
+
 struct class block_class = {
        .name           = "block",
 };
@@ -1126,6 +1142,7 @@ static struct device_type disk_type = {
        .groups         = disk_attr_groups,
        .release        = disk_release,
        .devnode        = block_devnode,
+       .uevent         = disk_uevent,
 };
 
 #ifdef CONFIG_PROC_FS
index 789cdea05893bb8e3420ede5d1aa65e7af408e1b..c7942acf1379d1b21f664e9b2ad82403432cc0c7 100644 (file)
@@ -216,10 +216,21 @@ static void part_release(struct device *dev)
        kfree(p);
 }
 
+static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct hd_struct *part = dev_to_part(dev);
+
+       add_uevent_var(env, "PARTN=%u", part->partno);
+       if (part->info && part->info->volname[0])
+               add_uevent_var(env, "PARTNAME=%s", part->info->volname);
+       return 0;
+}
+
 struct device_type part_type = {
        .name           = "partition",
        .groups         = part_attr_groups,
        .release        = part_release,
+       .uevent         = part_uevent,
 };
 
 static void delete_partition_rcu_cb(struct rcu_head *head)
index d27feb5460f342522fdbd98fa2891c0001587587..ba3c789ad9b82ecf9e62061e0050616ffac3c5f9 100644 (file)
@@ -100,6 +100,8 @@ source "drivers/memstick/Kconfig"
 
 source "drivers/leds/Kconfig"
 
+source "drivers/switch/Kconfig"
+
 source "drivers/accessibility/Kconfig"
 
 source "drivers/infiniband/Kconfig"
index 092a62e79688da9b1a7466da63e52b9788f86ea0..1c92047b81bd7c756eb9eb8f3ecf772ede171002 100644 (file)
@@ -111,6 +111,7 @@ obj-$(CONFIG_CPU_IDLE)              += cpuidle/
 obj-y                          += mmc/
 obj-$(CONFIG_MEMSTICK)         += memstick/
 obj-y                          += leds/
+obj-$(CONFIG_SWITCH)           += switch/
 obj-$(CONFIG_INFINIBAND)       += infiniband/
 obj-$(CONFIG_SGI_SN)           += sn/
 obj-y                          += firmware/
index 220ec3a3ca750907bb26c3f2866e1c012b04db52..a42c3548bdd34bcfe4d3e47390c438333c7d35cc 100644 (file)
@@ -31,6 +31,8 @@
 #include <trace/events/power.h>
 #include <linux/cpufreq.h>
 #include <linux/cpuidle.h>
+#include <linux/timer.h>
+
 #include "../base.h"
 #include "power.h"
 
@@ -56,6 +58,12 @@ struct suspend_stats suspend_stats;
 static DEFINE_MUTEX(dpm_list_mtx);
 static pm_message_t pm_transition;
 
+struct dpm_watchdog {
+       struct device           *dev;
+       struct task_struct      *tsk;
+       struct timer_list       timer;
+};
+
 static int async_error;
 
 /**
@@ -386,6 +394,56 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
        return error;
 }
 
+/**
+ * dpm_wd_handler - Driver suspend / resume watchdog handler.
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so BUG() out for
+ * a crash-dump
+ */
+static void dpm_wd_handler(unsigned long data)
+{
+       struct dpm_watchdog *wd = (void *)data;
+       struct device *dev      = wd->dev;
+       struct task_struct *tsk = wd->tsk;
+
+       dev_emerg(dev, "**** DPM device timeout ****\n");
+       show_stack(tsk, NULL);
+
+       BUG();
+}
+
+/**
+ * dpm_wd_set - Enable pm watchdog for given device.
+ * @wd: Watchdog. Must be allocated on the stack.
+ * @dev: Device to handle.
+ */
+static void dpm_wd_set(struct dpm_watchdog *wd, struct device *dev)
+{
+       struct timer_list *timer = &wd->timer;
+
+       wd->dev = dev;
+       wd->tsk = get_current();
+
+       init_timer_on_stack(timer);
+       timer->expires = jiffies + HZ * 12;
+       timer->function = dpm_wd_handler;
+       timer->data = (unsigned long)wd;
+       add_timer(timer);
+}
+
+/**
+ * dpm_wd_clear - Disable pm watchdog.
+ * @wd: Watchdog to disable.
+ */
+static void dpm_wd_clear(struct dpm_watchdog *wd)
+{
+       struct timer_list *timer = &wd->timer;
+
+       del_timer_sync(timer);
+       destroy_timer_on_stack(timer);
+}
+
 /*------------------------- Resume routines -------------------------*/
 
 /**
@@ -572,6 +630,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
        pm_callback_t callback = NULL;
        char *info = NULL;
        int error = 0;
+       struct dpm_watchdog wd;
 
        TRACE_DEVICE(dev);
        TRACE_RESUME(0);
@@ -587,6 +646,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
         * a resumed device, even if the device hasn't been completed yet.
         */
        dev->power.is_prepared = false;
+       dpm_wd_set(&wd, dev);
 
        if (!dev->power.is_suspended)
                goto Unlock;
@@ -638,6 +698,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
 
  Unlock:
        device_unlock(dev);
+       dpm_wd_clear(&wd);
 
  Complete:
        complete_all(&dev->power.completion);
@@ -1057,6 +1118,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        pm_callback_t callback = NULL;
        char *info = NULL;
        int error = 0;
+       struct dpm_watchdog wd;
 
        dpm_wait_for_children(dev, async);
 
@@ -1079,6 +1141,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 
        if (dev->power.syscore)
                goto Complete;
+       
+       dpm_wd_set(&wd, dev);
 
        device_lock(dev);
 
@@ -1135,6 +1199,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 
        device_unlock(dev);
 
+       dpm_wd_clear(&wd);
+
  Complete:
        complete_all(&dev->power.completion);
        if (error)
index 3bb6fa3930beba0c07282f858be05e2c74e10b79..6fcb9b01e57c10a56f30152b1a5d734444a4cc79 100644 (file)
@@ -6,6 +6,19 @@ menu "Character devices"
 
 source "drivers/tty/Kconfig"
 
+config DEVMEM
+       bool "Memory device driver"
+       default y
+       help
+         The memory driver provides two character devices, mem and kmem, which
+         provide access to the system's memory. The mem device is a view of
+         physical memory, and each byte in the device corresponds to the
+         matching physical address. The kmem device is the same as mem, but
+         the addresses correspond to the kernel's virtual address space rather
+         than physical memory. These devices are standard parts of a Linux
+         system and most users should say Y here. You might say N if very
+         security conscience or memory is tight.
+
 config DEVKMEM
        bool "/dev/kmem virtual device support"
        default y
@@ -584,6 +597,10 @@ config DEVPORT
        depends on ISA || PCI
        default y
 
+config DCC_TTY
+       tristate "DCC tty driver"
+       depends on ARM
+
 source "drivers/s390/char/Kconfig"
 
 config MSM_SMD_PKT
index 7ff1d0d208a7c3f89f6827fd3e9ba8815dad202d..e0047ed1e74c3b20f8086d6856f82b16876ab442 100644 (file)
@@ -56,6 +56,7 @@ obj-$(CONFIG_PCMCIA)          += pcmcia/
 obj-$(CONFIG_HANGCHECK_TIMER)  += hangcheck-timer.o
 obj-$(CONFIG_TCG_TPM)          += tpm/
 
+obj-$(CONFIG_DCC_TTY)          += dcc_tty.o
 obj-$(CONFIG_PS3_FLASH)                += ps3flash.o
 
 obj-$(CONFIG_JS_RTC)           += js-rtc.o
diff --git a/drivers/char/dcc_tty.c b/drivers/char/dcc_tty.c
new file mode 100644 (file)
index 0000000..0a62d41
--- /dev/null
@@ -0,0 +1,326 @@
+/* drivers/char/dcc_tty.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/hrtimer.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+MODULE_DESCRIPTION("DCC TTY Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+DEFINE_SPINLOCK(g_dcc_tty_lock);
+static struct hrtimer g_dcc_timer;
+static char g_dcc_buffer[16];
+static int g_dcc_buffer_head;
+static int g_dcc_buffer_count;
+static unsigned g_dcc_write_delay_usecs = 1;
+static struct tty_driver *g_dcc_tty_driver;
+static struct tty_struct *g_dcc_tty;
+static int g_dcc_tty_open_count;
+
+static void dcc_poll_locked(void)
+{
+       char ch;
+       int rch;
+       int written;
+
+       while (g_dcc_buffer_count) {
+               ch = g_dcc_buffer[g_dcc_buffer_head];
+               asm(
+                       "mrc 14, 0, r15, c0, c1, 0\n"
+                       "mcrcc 14, 0, %1, c0, c5, 0\n"
+                       "movcc %0, #1\n"
+                       "movcs %0, #0\n"
+                       : "=r" (written)
+                       : "r" (ch)
+               );
+               if (written) {
+                       if (ch == '\n')
+                               g_dcc_buffer[g_dcc_buffer_head] = '\r';
+                       else {
+                               g_dcc_buffer_head = (g_dcc_buffer_head + 1) % ARRAY_SIZE(g_dcc_buffer);
+                               g_dcc_buffer_count--;
+                               if (g_dcc_tty)
+                                       tty_wakeup(g_dcc_tty);
+                       }
+                       g_dcc_write_delay_usecs = 1;
+               } else {
+                       if (g_dcc_write_delay_usecs > 0x100)
+                               break;
+                       g_dcc_write_delay_usecs <<= 1;
+                       udelay(g_dcc_write_delay_usecs);
+               }
+       }
+
+       if (g_dcc_tty && !test_bit(TTY_THROTTLED, &g_dcc_tty->flags)) {
+               asm(
+                       "mrc 14, 0, %0, c0, c1, 0\n"
+                       "tst %0, #(1 << 30)\n"
+                       "moveq %0, #-1\n"
+                       "mrcne 14, 0, %0, c0, c5, 0\n"
+                       : "=r" (rch)
+               );
+               if (rch >= 0) {
+                       ch = rch;
+                       tty_insert_flip_string(g_dcc_tty->port, &ch, 1);
+                       tty_flip_buffer_push(g_dcc_tty->port);
+               }
+       }
+
+
+       if (g_dcc_buffer_count)
+               hrtimer_start(&g_dcc_timer, ktime_set(0, g_dcc_write_delay_usecs * NSEC_PER_USEC), HRTIMER_MODE_REL);
+       else
+               hrtimer_start(&g_dcc_timer, ktime_set(0, 20 * NSEC_PER_MSEC), HRTIMER_MODE_REL);
+}
+
+static int dcc_tty_open(struct tty_struct * tty, struct file * filp)
+{
+       int ret;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+       if (g_dcc_tty == NULL || g_dcc_tty == tty) {
+               g_dcc_tty = tty;
+               g_dcc_tty_open_count++;
+               ret = 0;
+       } else
+               ret = -EBUSY;
+       spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+
+       printk("dcc_tty_open, tty %p, f_flags %x, returned %d\n", tty, filp->f_flags, ret);
+
+       return ret;
+}
+
+static void dcc_tty_close(struct tty_struct * tty, struct file * filp)
+{
+       printk("dcc_tty_close, tty %p, f_flags %x\n", tty, filp->f_flags);
+       if (g_dcc_tty == tty) {
+               if (--g_dcc_tty_open_count == 0)
+                       g_dcc_tty = NULL;
+       }
+}
+
+static int dcc_write(const unsigned char *buf_start, int count)
+{
+       const unsigned char *buf = buf_start;
+       unsigned long irq_flags;
+       int copy_len;
+       int space_left;
+       int tail;
+
+       if (count < 1)
+               return 0;
+
+       spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+       do {
+               tail = (g_dcc_buffer_head + g_dcc_buffer_count) % ARRAY_SIZE(g_dcc_buffer);
+               copy_len = ARRAY_SIZE(g_dcc_buffer) - tail;
+               space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+               if (copy_len > space_left)
+                       copy_len = space_left;
+               if (copy_len > count)
+                       copy_len = count;
+               memcpy(&g_dcc_buffer[tail], buf, copy_len);
+               g_dcc_buffer_count += copy_len;
+               buf += copy_len;
+               count -= copy_len;
+               if (copy_len < count && copy_len < space_left) {
+                       space_left -= copy_len;
+                       copy_len = count;
+                       if (copy_len > space_left) {
+                               copy_len = space_left;
+                       }
+                       memcpy(g_dcc_buffer, buf, copy_len);
+                       buf += copy_len;
+                       count -= copy_len;
+                       g_dcc_buffer_count += copy_len;
+               }
+               dcc_poll_locked();
+               space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+       } while(count && space_left);
+       spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+       return buf - buf_start;
+}
+
+static int dcc_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
+{
+       int ret;
+       /* printk("dcc_tty_write %p, %d\n", buf, count); */
+       ret = dcc_write(buf, count);
+       if (ret != count)
+               printk("dcc_tty_write %p, %d, returned %d\n", buf, count, ret);
+       return ret;
+}
+
+static int dcc_tty_write_room(struct tty_struct *tty)
+{
+       int space_left;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+       space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+       spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+       return space_left;
+}
+
+static int dcc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+       int ret;
+       asm(
+               "mrc 14, 0, %0, c0, c1, 0\n"
+               "mov %0, %0, LSR #30\n"
+               "and %0, %0, #1\n"
+               : "=r" (ret)
+       );
+       return ret;
+}
+
+static void dcc_tty_unthrottle(struct tty_struct * tty)
+{
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+       dcc_poll_locked();
+       spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+}
+
+static enum hrtimer_restart dcc_tty_timer_func(struct hrtimer *timer)
+{
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+       dcc_poll_locked();
+       spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+       return HRTIMER_NORESTART;
+}
+
+void dcc_console_write(struct console *co, const char *b, unsigned count)
+{
+#if 1
+       dcc_write(b, count);
+#else
+       /* blocking printk */
+       while (count > 0) {
+               int written;
+               written = dcc_write(b, count);
+               if (written) {
+                       b += written;
+                       count -= written;
+               }
+       }
+#endif
+}
+
+static struct tty_driver *dcc_console_device(struct console *c, int *index)
+{
+       *index = 0;
+       return g_dcc_tty_driver;
+}
+
+static int __init dcc_console_setup(struct console *co, char *options)
+{
+       if (co->index != 0)
+               return -ENODEV;
+       return 0;
+}
+
+
+static struct console dcc_console =
+{
+       .name           = "ttyDCC",
+       .write          = dcc_console_write,
+       .device         = dcc_console_device,
+       .setup          = dcc_console_setup,
+       .flags          = CON_PRINTBUFFER,
+       .index          = -1,
+};
+
+static struct tty_operations dcc_tty_ops = {
+       .open = dcc_tty_open,
+       .close = dcc_tty_close,
+       .write = dcc_tty_write,
+       .write_room = dcc_tty_write_room,
+       .chars_in_buffer = dcc_tty_chars_in_buffer,
+       .unthrottle = dcc_tty_unthrottle,
+};
+
+static int __init dcc_tty_init(void)
+{
+       int ret;
+
+       hrtimer_init(&g_dcc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       g_dcc_timer.function = dcc_tty_timer_func;
+
+       g_dcc_tty_driver = alloc_tty_driver(1);
+       if (!g_dcc_tty_driver) {
+               printk(KERN_ERR "dcc_tty_probe: alloc_tty_driver failed\n");
+               ret = -ENOMEM;
+               goto err_alloc_tty_driver_failed;
+       }
+       g_dcc_tty_driver->owner = THIS_MODULE;
+       g_dcc_tty_driver->driver_name = "dcc";
+       g_dcc_tty_driver->name = "ttyDCC";
+       g_dcc_tty_driver->major = 0; // auto assign
+       g_dcc_tty_driver->minor_start = 0;
+       g_dcc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+       g_dcc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+       g_dcc_tty_driver->init_termios = tty_std_termios;
+       g_dcc_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+       tty_set_operations(g_dcc_tty_driver, &dcc_tty_ops);
+       ret = tty_register_driver(g_dcc_tty_driver);
+       if (ret) {
+               printk(KERN_ERR "dcc_tty_probe: tty_register_driver failed, %d\n", ret);
+               goto err_tty_register_driver_failed;
+       }
+       tty_register_device(g_dcc_tty_driver, 0, NULL);
+
+       register_console(&dcc_console);
+       hrtimer_start(&g_dcc_timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+       return 0;
+
+err_tty_register_driver_failed:
+       put_tty_driver(g_dcc_tty_driver);
+       g_dcc_tty_driver = NULL;
+err_alloc_tty_driver_failed:
+       return ret;
+}
+
+static void  __exit dcc_tty_exit(void)
+{
+       int ret;
+
+       tty_unregister_device(g_dcc_tty_driver, 0);
+       ret = tty_unregister_driver(g_dcc_tty_driver);
+       if (ret < 0) {
+               printk(KERN_ERR "dcc_tty_remove: tty_unregister_driver failed, %d\n", ret);
+       } else {
+               put_tty_driver(g_dcc_tty_driver);
+       }
+       g_dcc_tty_driver = NULL;
+}
+
+module_init(dcc_tty_init);
+module_exit(dcc_tty_exit);
+
+
index 1ccbe9482faa5dbc3930478a17f493781849f161..38d3069b7f0a45150da1d02615a36b022a3754f9 100644 (file)
@@ -60,6 +60,7 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 }
 #endif
 
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
 #ifdef CONFIG_STRICT_DEVMEM
 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 {
@@ -85,7 +86,9 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
        return 1;
 }
 #endif
+#endif
 
+#ifdef CONFIG_DEVMEM
 void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
 {
 }
@@ -212,6 +215,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
        *ppos += written;
        return written;
 }
+#endif /* CONFIG_DEVMEM */
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
 
 int __weak phys_mem_access_prot_allowed(struct file *file,
        unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
@@ -333,6 +339,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
        }
        return 0;
 }
+#endif /* CONFIG_DEVMEM */
 
 #ifdef CONFIG_DEVKMEM
 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
@@ -727,6 +734,8 @@ static loff_t null_lseek(struct file *file, loff_t offset, int orig)
        return file->f_pos = 0;
 }
 
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
+
 /*
  * The memory devices use the full 32/64 bits of the offset, and so we cannot
  * check against negative addresses: they are ok. The return value is weird,
@@ -760,10 +769,14 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
        return ret;
 }
 
+#endif
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
 static int open_port(struct inode *inode, struct file *filp)
 {
        return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
 }
+#endif
 
 #define zero_lseek     null_lseek
 #define full_lseek      null_lseek
@@ -774,6 +787,7 @@ static int open_port(struct inode *inode, struct file *filp)
 #define open_kmem      open_mem
 #define open_oldmem    open_mem
 
+#ifdef CONFIG_DEVMEM
 static const struct file_operations mem_fops = {
        .llseek         = memory_lseek,
        .read           = read_mem,
@@ -782,6 +796,7 @@ static const struct file_operations mem_fops = {
        .open           = open_mem,
        .get_unmapped_area = get_unmapped_area_mem,
 };
+#endif
 
 #ifdef CONFIG_DEVKMEM
 static const struct file_operations kmem_fops = {
@@ -851,7 +866,9 @@ static const struct memdev {
        const struct file_operations *fops;
        struct backing_dev_info *dev_info;
 } devlist[] = {
+#ifdef CONFIG_DEVMEM
         [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
+#endif
 #ifdef CONFIG_DEVKMEM
         [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
 #endif
index a9c1324843ebe4148bfc31421641a658e83faef7..9e1f7d9b52b5a26ca27551fd2449d44fdb898427 100644 (file)
@@ -102,6 +102,16 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
          Be aware that not all cpufreq drivers support the conservative
          governor. If unsure have a look at the help section of the
          driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+       bool "interactive"
+       select CPU_FREQ_GOV_INTERACTIVE
+       help
+         Use the CPUFreq governor 'interactive' as default. This allows
+         you to get a full dynamic cpu frequency capable system by simply
+         loading your cpufreq low-level hardware driver, using the
+         'interactive' governor for latency-sensitive workloads.
+
 endchoice
 
 config CPU_FREQ_GOV_PERFORMANCE
@@ -160,6 +170,24 @@ config CPU_FREQ_GOV_ONDEMAND
 
          If in doubt, say N.
 
+config CPU_FREQ_GOV_INTERACTIVE
+       tristate "'interactive' cpufreq policy governor"
+       default n
+       help
+         'interactive' - This driver adds a dynamic cpufreq policy governor
+         designed for latency-sensitive workloads.
+
+         This governor attempts to reduce the latency of clock
+         increases so that the system is more responsive to
+         interactive workloads.
+
+         To compile this driver as a module, choose M here: the
+         module will be called cpufreq_interactive.
+
+         For details, take a look at linux/Documentation/cpu-freq.
+
+         If in doubt, say N.
+
 config CPU_FREQ_GOV_CONSERVATIVE
        tristate "'conservative' cpufreq governor"
        depends on CPU_FREQ
index 505c62bceb9d190b4715349c0c5038eace378cb3..a8f252b32b8c6a9bc74731e4fcd74b2ab596a25e 100644 (file)
@@ -9,6 +9,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE)    += cpufreq_powersave.o
 obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE)   += cpufreq_userspace.o
 obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)    += cpufreq_ondemand.o
 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)        += cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
 obj-$(CONFIG_CPU_FREQ_GOV_COMMON)              += cpufreq_governor.o
 
 # CPUfreq cross-arch helpers
index ce94c323fa13dc88f147abd935fba80497b426d6..aea1999b56ae4391fd5240f684ed5fcdc677b0d2 100644 (file)
@@ -17,7 +17,9 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <asm/cputime.h>
 #include <linux/kernel.h>
+#include <linux/kernel_stat.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/notifier.h>
@@ -25,6 +27,7 @@
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
+#include <linux/tick.h>
 #include <linux/device.h>
 #include <linux/slab.h>
 #include <linux/cpu.h>
@@ -143,6 +146,51 @@ bool have_governor_per_policy(void)
 {
        return cpufreq_driver->have_governor_per_policy;
 }
+EXPORT_SYMBOL_GPL(have_governor_per_policy);
+
+struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
+{
+       if (have_governor_per_policy())
+               return &policy->kobj;
+       else
+               return cpufreq_global_kobject;
+}
+EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
+
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
+{
+       u64 idle_time;
+       u64 cur_wall_time;
+       u64 busy_time;
+
+       cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+
+       busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+
+       idle_time = cur_wall_time - busy_time;
+       if (wall)
+               *wall = cputime_to_usecs(cur_wall_time);
+
+       return cputime_to_usecs(idle_time);
+}
+
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
+{
+       u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
+
+       if (idle_time == -1ULL)
+               return get_cpu_idle_time_jiffy(cpu, wall);
+       else if (!io_busy)
+               idle_time += get_cpu_iowait_time_us(cpu, wall);
+
+       return idle_time;
+}
+EXPORT_SYMBOL_GPL(get_cpu_idle_time);
 
 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
 {
index 28a0b32c73b3c4747cff6d63b22ded9283623d93..f007924197dfe8ee43557a1d4034b056672881d7 100644 (file)
 #include <linux/kernel_stat.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
-#include <linux/tick.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
 
 #include "cpufreq_governor.h"
 
-static struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
-{
-       if (have_governor_per_policy())
-               return &policy->kobj;
-       else
-               return cpufreq_global_kobject;
-}
-
 static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
 {
        if (have_governor_per_policy())
@@ -45,41 +36,6 @@ static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
                return dbs_data->cdata->attr_group_gov_sys;
 }
 
-static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
-{
-       u64 idle_time;
-       u64 cur_wall_time;
-       u64 busy_time;
-
-       cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
-
-       busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
-
-       idle_time = cur_wall_time - busy_time;
-       if (wall)
-               *wall = cputime_to_usecs(cur_wall_time);
-
-       return cputime_to_usecs(idle_time);
-}
-
-u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
-{
-       u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
-
-       if (idle_time == -1ULL)
-               return get_cpu_idle_time_jiffy(cpu, wall);
-       else if (!io_busy)
-               idle_time += get_cpu_iowait_time_us(cpu, wall);
-
-       return idle_time;
-}
-EXPORT_SYMBOL_GPL(get_cpu_idle_time);
-
 void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
 {
        struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
@@ -404,6 +360,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 
                mutex_lock(&dbs_data->mutex);
                mutex_destroy(&cpu_cdbs->timer_mutex);
+               cpu_cdbs->cur_policy = NULL;
 
                mutex_unlock(&dbs_data->mutex);
 
index 0d9e6befe1d54a7bc88e1bc2d785b46bb04b4fd6..c501ca83d7599006067ba39cb7ca0ef02e23ca92 100644 (file)
@@ -256,7 +256,6 @@ static ssize_t show_sampling_rate_min_gov_pol                               \
        return sprintf(buf, "%u\n", dbs_data->min_sampling_rate);       \
 }
 
-u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
 void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
 bool need_load_eval(struct cpu_dbs_common_info *cdbs,
                unsigned int sampling_rate);
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
new file mode 100644 (file)
index 0000000..4d2557d
--- /dev/null
@@ -0,0 +1,1370 @@
+/*
+ * drivers/cpufreq/cpufreq_interactive.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/tick.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include "cpufreq_governor.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_interactive.h>
+
+struct cpufreq_interactive_cpuinfo {
+       struct timer_list cpu_timer;
+       struct timer_list cpu_slack_timer;
+       spinlock_t load_lock; /* protects the next 4 fields */
+       u64 time_in_idle;
+       u64 time_in_idle_timestamp;
+       u64 cputime_speedadj;
+       u64 cputime_speedadj_timestamp;
+       struct cpufreq_policy *policy;
+       struct cpufreq_frequency_table *freq_table;
+       spinlock_t target_freq_lock; /*protects target freq */
+       unsigned int target_freq;
+       unsigned int floor_freq;
+       unsigned int max_freq;
+       u64 floor_validate_time;
+       u64 hispeed_validate_time;
+       struct rw_semaphore enable_sem;
+       int governor_enabled;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
+
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
+static struct mutex gov_lock;
+
+/* Target load.  Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
+
+#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
+#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
+static unsigned int default_above_hispeed_delay[] = {
+       DEFAULT_ABOVE_HISPEED_DELAY };
+
+struct cpufreq_interactive_tunables {
+       int usage_count;
+       /* Hi speed to bump to from lo speed when load burst (default max) */
+       unsigned int hispeed_freq;
+       /* Go to hi speed when CPU load at or above this value. */
+#define DEFAULT_GO_HISPEED_LOAD 99
+       unsigned long go_hispeed_load;
+       /* Target load. Lower values result in higher CPU speeds. */
+       spinlock_t target_loads_lock;
+       unsigned int *target_loads;
+       int ntarget_loads;
+       /*
+        * The minimum amount of time to spend at a frequency before we can ramp
+        * down.
+        */
+#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
+       unsigned long min_sample_time;
+       /*
+        * The sample rate of the timer used to increase frequency
+        */
+       unsigned long timer_rate;
+       /*
+        * Wait this long before raising speed above hispeed, by default a
+        * single timer interval.
+        */
+       spinlock_t above_hispeed_delay_lock;
+       unsigned int *above_hispeed_delay;
+       int nabove_hispeed_delay;
+       /* Non-zero means indefinite speed boost active */
+       int boost_val;
+       /* Duration of a boot pulse in usecs */
+       int boostpulse_duration_val;
+       /* End time of boost pulse in ktime converted to usecs */
+       u64 boostpulse_endtime;
+       /*
+        * Max additional time to wait in idle, beyond timer_rate, at speeds
+        * above minimum before wakeup to reduce speed, or -1 if unnecessary.
+        */
+#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
+       int timer_slack_val;
+       bool io_is_busy;
+};
+
+/* For cases where we have single governor instance for system */
+struct cpufreq_interactive_tunables *common_tunables;
+
+static struct attribute_group *get_sysfs_attr(void);
+
+static void cpufreq_interactive_timer_resched(
+       struct cpufreq_interactive_cpuinfo *pcpu)
+{
+       struct cpufreq_interactive_tunables *tunables =
+               pcpu->policy->governor_data;
+       unsigned long expires;
+       unsigned long flags;
+
+       spin_lock_irqsave(&pcpu->load_lock, flags);
+       pcpu->time_in_idle =
+               get_cpu_idle_time(smp_processor_id(),
+                                 &pcpu->time_in_idle_timestamp,
+                                 tunables->io_is_busy);
+       pcpu->cputime_speedadj = 0;
+       pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+       expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
+       mod_timer_pinned(&pcpu->cpu_timer, expires);
+
+       if (tunables->timer_slack_val >= 0 &&
+           pcpu->target_freq > pcpu->policy->min) {
+               expires += usecs_to_jiffies(tunables->timer_slack_val);
+               mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
+       }
+
+       spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+/* The caller shall take enable_sem write semaphore to avoid any timer race.
+ * The cpu_timer and cpu_slack_timer must be deactivated when calling this
+ * function.
+ */
+static void cpufreq_interactive_timer_start(
+       struct cpufreq_interactive_tunables *tunables, int cpu)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+       unsigned long expires = jiffies +
+               usecs_to_jiffies(tunables->timer_rate);
+       unsigned long flags;
+
+       pcpu->cpu_timer.expires = expires;
+       add_timer_on(&pcpu->cpu_timer, cpu);
+       if (tunables->timer_slack_val >= 0 &&
+           pcpu->target_freq > pcpu->policy->min) {
+               expires += usecs_to_jiffies(tunables->timer_slack_val);
+               pcpu->cpu_slack_timer.expires = expires;
+               add_timer_on(&pcpu->cpu_slack_timer, cpu);
+       }
+
+       spin_lock_irqsave(&pcpu->load_lock, flags);
+       pcpu->time_in_idle =
+               get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
+                                 tunables->io_is_busy);
+       pcpu->cputime_speedadj = 0;
+       pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+       spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+static unsigned int freq_to_above_hispeed_delay(
+       struct cpufreq_interactive_tunables *tunables,
+       unsigned int freq)
+{
+       int i;
+       unsigned int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+       for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
+                       freq >= tunables->above_hispeed_delay[i+1]; i += 2)
+               ;
+
+       ret = tunables->above_hispeed_delay[i];
+       spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+       return ret;
+}
+
+static unsigned int freq_to_targetload(
+       struct cpufreq_interactive_tunables *tunables, unsigned int freq)
+{
+       int i;
+       unsigned int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tunables->target_loads_lock, flags);
+
+       for (i = 0; i < tunables->ntarget_loads - 1 &&
+                   freq >= tunables->target_loads[i+1]; i += 2)
+               ;
+
+       ret = tunables->target_loads[i];
+       spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+       return ret;
+}
+
+/*
+ * If increasing frequencies never map to a lower target load then
+ * choose_freq() will find the minimum frequency that does not exceed its
+ * target load given the current load.
+ */
+static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
+               unsigned int loadadjfreq)
+{
+       unsigned int freq = pcpu->policy->cur;
+       unsigned int prevfreq, freqmin, freqmax;
+       unsigned int tl;
+       int index;
+
+       freqmin = 0;
+       freqmax = UINT_MAX;
+
+       do {
+               prevfreq = freq;
+               tl = freq_to_targetload(pcpu->policy->governor_data, freq);
+
+               /*
+                * Find the lowest frequency where the computed load is less
+                * than or equal to the target load.
+                */
+
+               if (cpufreq_frequency_table_target(
+                           pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
+                           CPUFREQ_RELATION_L, &index))
+                       break;
+               freq = pcpu->freq_table[index].frequency;
+
+               if (freq > prevfreq) {
+                       /* The previous frequency is too low. */
+                       freqmin = prevfreq;
+
+                       if (freq >= freqmax) {
+                               /*
+                                * Find the highest frequency that is less
+                                * than freqmax.
+                                */
+                               if (cpufreq_frequency_table_target(
+                                           pcpu->policy, pcpu->freq_table,
+                                           freqmax - 1, CPUFREQ_RELATION_H,
+                                           &index))
+                                       break;
+                               freq = pcpu->freq_table[index].frequency;
+
+                               if (freq == freqmin) {
+                                       /*
+                                        * The first frequency below freqmax
+                                        * has already been found to be too
+                                        * low.  freqmax is the lowest speed
+                                        * we found that is fast enough.
+                                        */
+                                       freq = freqmax;
+                                       break;
+                               }
+                       }
+               } else if (freq < prevfreq) {
+                       /* The previous frequency is high enough. */
+                       freqmax = prevfreq;
+
+                       if (freq <= freqmin) {
+                               /*
+                                * Find the lowest frequency that is higher
+                                * than freqmin.
+                                */
+                               if (cpufreq_frequency_table_target(
+                                           pcpu->policy, pcpu->freq_table,
+                                           freqmin + 1, CPUFREQ_RELATION_L,
+                                           &index))
+                                       break;
+                               freq = pcpu->freq_table[index].frequency;
+
+                               /*
+                                * If freqmax is the first frequency above
+                                * freqmin then we have already found that
+                                * this speed is fast enough.
+                                */
+                               if (freq == freqmax)
+                                       break;
+                       }
+               }
+
+               /* If same frequency chosen as previous then done. */
+       } while (freq != prevfreq);
+
+       return freq;
+}
+
+static u64 update_load(int cpu)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+       struct cpufreq_interactive_tunables *tunables =
+               pcpu->policy->governor_data;
+       u64 now;
+       u64 now_idle;
+       u64 delta_idle;
+       u64 delta_time;
+       u64 active_time;
+
+       now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
+       delta_idle = (now_idle - pcpu->time_in_idle);
+       delta_time = (now - pcpu->time_in_idle_timestamp);
+
+       if (delta_time <= delta_idle)
+               active_time = 0;
+       else
+               active_time = delta_time - delta_idle;
+
+       pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
+
+       pcpu->time_in_idle = now_idle;
+       pcpu->time_in_idle_timestamp = now;
+       return now;
+}
+
+static void cpufreq_interactive_timer(unsigned long data)
+{
+       u64 now;
+       unsigned int delta_time;
+       u64 cputime_speedadj;
+       int cpu_load;
+       struct cpufreq_interactive_cpuinfo *pcpu =
+               &per_cpu(cpuinfo, data);
+       struct cpufreq_interactive_tunables *tunables =
+               pcpu->policy->governor_data;
+       unsigned int new_freq;
+       unsigned int loadadjfreq;
+       unsigned int index;
+       unsigned long flags;
+       bool boosted;
+
+       if (!down_read_trylock(&pcpu->enable_sem))
+               return;
+       if (!pcpu->governor_enabled)
+               goto exit;
+
+       spin_lock_irqsave(&pcpu->load_lock, flags);
+       now = update_load(data);
+       delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
+       cputime_speedadj = pcpu->cputime_speedadj;
+       spin_unlock_irqrestore(&pcpu->load_lock, flags);
+
+       if (WARN_ON_ONCE(!delta_time))
+               goto rearm;
+
+       spin_lock_irqsave(&pcpu->target_freq_lock, flags);
+       do_div(cputime_speedadj, delta_time);
+       loadadjfreq = (unsigned int)cputime_speedadj * 100;
+       cpu_load = loadadjfreq / pcpu->target_freq;
+       boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
+
+       if (cpu_load >= tunables->go_hispeed_load || boosted) {
+               if (pcpu->target_freq < tunables->hispeed_freq) {
+                       new_freq = tunables->hispeed_freq;
+               } else {
+                       new_freq = choose_freq(pcpu, loadadjfreq);
+
+                       if (new_freq < tunables->hispeed_freq)
+                               new_freq = tunables->hispeed_freq;
+               }
+       } else {
+               new_freq = choose_freq(pcpu, loadadjfreq);
+               if (new_freq > tunables->hispeed_freq &&
+                               pcpu->target_freq < tunables->hispeed_freq)
+                       new_freq = tunables->hispeed_freq;
+       }
+
+       if (pcpu->target_freq >= tunables->hispeed_freq &&
+           new_freq > pcpu->target_freq &&
+           now - pcpu->hispeed_validate_time <
+           freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
+               trace_cpufreq_interactive_notyet(
+                       data, cpu_load, pcpu->target_freq,
+                       pcpu->policy->cur, new_freq);
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+               goto rearm;
+       }
+
+       pcpu->hispeed_validate_time = now;
+
+       if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
+                                          new_freq, CPUFREQ_RELATION_L,
+                                          &index)) {
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+               goto rearm;
+       }
+
+       new_freq = pcpu->freq_table[index].frequency;
+
+       /*
+        * Do not scale below floor_freq unless we have been at or above the
+        * floor frequency for the minimum sample time since last validated.
+        */
+       if (new_freq < pcpu->floor_freq) {
+               if (now - pcpu->floor_validate_time <
+                               tunables->min_sample_time) {
+                       trace_cpufreq_interactive_notyet(
+                               data, cpu_load, pcpu->target_freq,
+                               pcpu->policy->cur, new_freq);
+                       spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+                       goto rearm;
+               }
+       }
+
+       /*
+        * Update the timestamp for checking whether speed has been held at
+        * or above the selected frequency for a minimum of min_sample_time,
+        * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
+        * allow the speed to drop as soon as the boostpulse duration expires
+        * (or the indefinite boost is turned off).
+        */
+
+       if (!boosted || new_freq > tunables->hispeed_freq) {
+               pcpu->floor_freq = new_freq;
+               pcpu->floor_validate_time = now;
+       }
+
+       if (pcpu->target_freq == new_freq) {
+               trace_cpufreq_interactive_already(
+                       data, cpu_load, pcpu->target_freq,
+                       pcpu->policy->cur, new_freq);
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+               goto rearm_if_notmax;
+       }
+
+       trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
+                                        pcpu->policy->cur, new_freq);
+
+       pcpu->target_freq = new_freq;
+       spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+       spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+       cpumask_set_cpu(data, &speedchange_cpumask);
+       spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+       wake_up_process(speedchange_task);
+
+rearm_if_notmax:
+       /*
+        * Already set max speed and don't see a need to change that,
+        * wait until next idle to re-evaluate, don't need timer.
+        */
+       if (pcpu->target_freq == pcpu->policy->max)
+               goto exit;
+
+rearm:
+       if (!timer_pending(&pcpu->cpu_timer))
+               cpufreq_interactive_timer_resched(pcpu);
+
+exit:
+       up_read(&pcpu->enable_sem);
+       return;
+}
+
+static void cpufreq_interactive_idle_start(void)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu =
+               &per_cpu(cpuinfo, smp_processor_id());
+       int pending;
+
+       if (!down_read_trylock(&pcpu->enable_sem))
+               return;
+       if (!pcpu->governor_enabled) {
+               up_read(&pcpu->enable_sem);
+               return;
+       }
+
+       pending = timer_pending(&pcpu->cpu_timer);
+
+       if (pcpu->target_freq != pcpu->policy->min) {
+               /*
+                * Entering idle while not at lowest speed.  On some
+                * platforms this can hold the other CPU(s) at that speed
+                * even though the CPU is idle. Set a timer to re-evaluate
+                * speed so this idle CPU doesn't hold the other CPUs above
+                * min indefinitely.  This should probably be a quirk of
+                * the CPUFreq driver.
+                */
+               if (!pending)
+                       cpufreq_interactive_timer_resched(pcpu);
+       }
+
+       up_read(&pcpu->enable_sem);
+}
+
+static void cpufreq_interactive_idle_end(void)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu =
+               &per_cpu(cpuinfo, smp_processor_id());
+
+       if (!down_read_trylock(&pcpu->enable_sem))
+               return;
+       if (!pcpu->governor_enabled) {
+               up_read(&pcpu->enable_sem);
+               return;
+       }
+
+       /* Arm the timer for 1-2 ticks later if not already. */
+       if (!timer_pending(&pcpu->cpu_timer)) {
+               cpufreq_interactive_timer_resched(pcpu);
+       } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
+               del_timer(&pcpu->cpu_timer);
+               del_timer(&pcpu->cpu_slack_timer);
+               cpufreq_interactive_timer(smp_processor_id());
+       }
+
+       up_read(&pcpu->enable_sem);
+}
+
+static int cpufreq_interactive_speedchange_task(void *data)
+{
+       unsigned int cpu;
+       cpumask_t tmp_mask;
+       unsigned long flags;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+
+       while (1) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+               if (cpumask_empty(&speedchange_cpumask)) {
+                       spin_unlock_irqrestore(&speedchange_cpumask_lock,
+                                              flags);
+                       schedule();
+
+                       if (kthread_should_stop())
+                               break;
+
+                       spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+               }
+
+               set_current_state(TASK_RUNNING);
+               tmp_mask = speedchange_cpumask;
+               cpumask_clear(&speedchange_cpumask);
+               spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+               for_each_cpu(cpu, &tmp_mask) {
+                       unsigned int j;
+                       unsigned int max_freq = 0;
+
+                       pcpu = &per_cpu(cpuinfo, cpu);
+                       if (!down_read_trylock(&pcpu->enable_sem))
+                               continue;
+                       if (!pcpu->governor_enabled) {
+                               up_read(&pcpu->enable_sem);
+                               continue;
+                       }
+
+                       for_each_cpu(j, pcpu->policy->cpus) {
+                               struct cpufreq_interactive_cpuinfo *pjcpu =
+                                       &per_cpu(cpuinfo, j);
+
+                               if (pjcpu->target_freq > max_freq)
+                                       max_freq = pjcpu->target_freq;
+                       }
+
+                       if (max_freq != pcpu->policy->cur)
+                               __cpufreq_driver_target(pcpu->policy,
+                                                       max_freq,
+                                                       CPUFREQ_RELATION_H);
+                       trace_cpufreq_interactive_setspeed(cpu,
+                                                    pcpu->target_freq,
+                                                    pcpu->policy->cur);
+
+                       up_read(&pcpu->enable_sem);
+               }
+       }
+
+       return 0;
+}
+
+static void cpufreq_interactive_boost(void)
+{
+       int i;
+       int anyboost = 0;
+       unsigned long flags[2];
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       struct cpufreq_interactive_tunables *tunables;
+
+       spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
+
+       for_each_online_cpu(i) {
+               pcpu = &per_cpu(cpuinfo, i);
+               tunables = pcpu->policy->governor_data;
+
+               spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
+               if (pcpu->target_freq < tunables->hispeed_freq) {
+                       pcpu->target_freq = tunables->hispeed_freq;
+                       cpumask_set_cpu(i, &speedchange_cpumask);
+                       pcpu->hispeed_validate_time =
+                               ktime_to_us(ktime_get());
+                       anyboost = 1;
+               }
+
+               /*
+                * Set floor freq and (re)start timer for when last
+                * validated.
+                */
+
+               pcpu->floor_freq = tunables->hispeed_freq;
+               pcpu->floor_validate_time = ktime_to_us(ktime_get());
+               spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
+       }
+
+       spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
+
+       if (anyboost)
+               wake_up_process(speedchange_task);
+}
+
+static int cpufreq_interactive_notifier(
+       struct notifier_block *nb, unsigned long val, void *data)
+{
+       struct cpufreq_freqs *freq = data;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       int cpu;
+       unsigned long flags;
+
+       if (val == CPUFREQ_POSTCHANGE) {
+               pcpu = &per_cpu(cpuinfo, freq->cpu);
+               if (!down_read_trylock(&pcpu->enable_sem))
+                       return 0;
+               if (!pcpu->governor_enabled) {
+                       up_read(&pcpu->enable_sem);
+                       return 0;
+               }
+
+               for_each_cpu(cpu, pcpu->policy->cpus) {
+                       struct cpufreq_interactive_cpuinfo *pjcpu =
+                               &per_cpu(cpuinfo, cpu);
+                       if (cpu != freq->cpu) {
+                               if (!down_read_trylock(&pjcpu->enable_sem))
+                                       continue;
+                               if (!pjcpu->governor_enabled) {
+                                       up_read(&pjcpu->enable_sem);
+                                       continue;
+                               }
+                       }
+                       spin_lock_irqsave(&pjcpu->load_lock, flags);
+                       update_load(cpu);
+                       spin_unlock_irqrestore(&pjcpu->load_lock, flags);
+                       if (cpu != freq->cpu)
+                               up_read(&pjcpu->enable_sem);
+               }
+
+               up_read(&pcpu->enable_sem);
+       }
+       return 0;
+}
+
+static struct notifier_block cpufreq_notifier_block = {
+       .notifier_call = cpufreq_interactive_notifier,
+};
+
+static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
+{
+       const char *cp;
+       int i;
+       int ntokens = 1;
+       unsigned int *tokenized_data;
+       int err = -EINVAL;
+
+       cp = buf;
+       while ((cp = strpbrk(cp + 1, " :")))
+               ntokens++;
+
+       if (!(ntokens & 0x1))
+               goto err;
+
+       tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
+       if (!tokenized_data) {
+               err = -ENOMEM;
+               goto err;
+       }
+
+       cp = buf;
+       i = 0;
+       while (i < ntokens) {
+               if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
+                       goto err_kfree;
+
+               cp = strpbrk(cp, " :");
+               if (!cp)
+                       break;
+               cp++;
+       }
+
+       if (i != ntokens)
+               goto err_kfree;
+
+       *num_tokens = ntokens;
+       return tokenized_data;
+
+err_kfree:
+       kfree(tokenized_data);
+err:
+       return ERR_PTR(err);
+}
+
+static ssize_t show_target_loads(
+       struct cpufreq_interactive_tunables *tunables,
+       char *buf)
+{
+       int i;
+       ssize_t ret = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tunables->target_loads_lock, flags);
+
+       for (i = 0; i < tunables->ntarget_loads; i++)
+               ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
+                              i & 0x1 ? ":" : " ");
+
+       sprintf(buf + ret - 1, "\n");
+       spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+       return ret;
+}
+
+static ssize_t store_target_loads(
+       struct cpufreq_interactive_tunables *tunables,
+       const char *buf, size_t count)
+{
+       int ntokens;
+       unsigned int *new_target_loads = NULL;
+       unsigned long flags;
+
+       new_target_loads = get_tokenized_data(buf, &ntokens);
+       if (IS_ERR(new_target_loads))
+               return PTR_RET(new_target_loads);
+
+       spin_lock_irqsave(&tunables->target_loads_lock, flags);
+       if (tunables->target_loads != default_target_loads)
+               kfree(tunables->target_loads);
+       tunables->target_loads = new_target_loads;
+       tunables->ntarget_loads = ntokens;
+       spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+       return count;
+}
+
+static ssize_t show_above_hispeed_delay(
+       struct cpufreq_interactive_tunables *tunables, char *buf)
+{
+       int i;
+       ssize_t ret = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+       for (i = 0; i < tunables->nabove_hispeed_delay; i++)
+               ret += sprintf(buf + ret, "%u%s",
+                              tunables->above_hispeed_delay[i],
+                              i & 0x1 ? ":" : " ");
+
+       sprintf(buf + ret - 1, "\n");
+       spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+       return ret;
+}
+
+static ssize_t store_above_hispeed_delay(
+       struct cpufreq_interactive_tunables *tunables,
+       const char *buf, size_t count)
+{
+       int ntokens;
+       unsigned int *new_above_hispeed_delay = NULL;
+       unsigned long flags;
+
+       new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
+       if (IS_ERR(new_above_hispeed_delay))
+               return PTR_RET(new_above_hispeed_delay);
+
+       spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+       if (tunables->above_hispeed_delay != default_above_hispeed_delay)
+               kfree(tunables->above_hispeed_delay);
+       tunables->above_hispeed_delay = new_above_hispeed_delay;
+       tunables->nabove_hispeed_delay = ntokens;
+       spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+       return count;
+
+}
+
+static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+               char *buf)
+{
+       return sprintf(buf, "%u\n", tunables->hispeed_freq);
+}
+
+static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+               const char *buf, size_t count)
+{
+       int ret;
+       long unsigned int val;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       tunables->hispeed_freq = val;
+       return count;
+}
+
+static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
+               *tunables, char *buf)
+{
+       return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
+}
+
+static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
+               *tunables, const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       tunables->go_hispeed_load = val;
+       return count;
+}
+
+static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
+               *tunables, char *buf)
+{
+       return sprintf(buf, "%lu\n", tunables->min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
+               *tunables, const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       tunables->min_sample_time = val;
+       return count;
+}
+
+static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
+               char *buf)
+{
+       return sprintf(buf, "%lu\n", tunables->timer_rate);
+}
+
+static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
+               const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       tunables->timer_rate = val;
+       return count;
+}
+
+static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
+               char *buf)
+{
+       return sprintf(buf, "%d\n", tunables->timer_slack_val);
+}
+
+static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
+               const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       tunables->timer_slack_val = val;
+       return count;
+}
+
+static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
+                         char *buf)
+{
+       return sprintf(buf, "%d\n", tunables->boost_val);
+}
+
+static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
+                          const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       tunables->boost_val = val;
+
+       if (tunables->boost_val) {
+               trace_cpufreq_interactive_boost("on");
+               cpufreq_interactive_boost();
+       } else {
+               tunables->boostpulse_endtime = ktime_to_us(ktime_get());
+               trace_cpufreq_interactive_unboost("off");
+       }
+
+       return count;
+}
+
+static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
+                               const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
+               tunables->boostpulse_duration_val;
+       trace_cpufreq_interactive_boost("pulse");
+       cpufreq_interactive_boost();
+       return count;
+}
+
+static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
+               *tunables, char *buf)
+{
+       return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
+}
+
+static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
+               *tunables, const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       tunables->boostpulse_duration_val = val;
+       return count;
+}
+
+static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+               char *buf)
+{
+       return sprintf(buf, "%u\n", tunables->io_is_busy);
+}
+
+static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+               const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = kstrtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       tunables->io_is_busy = val;
+       return count;
+}
+
+/*
+ * Create show/store routines
+ * - sys: One governor instance for complete SYSTEM
+ * - pol: One governor instance per struct cpufreq_policy
+ */
+#define show_gov_pol_sys(file_name)                                    \
+static ssize_t show_##file_name##_gov_sys                              \
+(struct kobject *kobj, struct attribute *attr, char *buf)              \
+{                                                                      \
+       return show_##file_name(common_tunables, buf);                  \
+}                                                                      \
+                                                                       \
+static ssize_t show_##file_name##_gov_pol                              \
+(struct cpufreq_policy *policy, char *buf)                             \
+{                                                                      \
+       return show_##file_name(policy->governor_data, buf);            \
+}
+
+#define store_gov_pol_sys(file_name)                                   \
+static ssize_t store_##file_name##_gov_sys                             \
+(struct kobject *kobj, struct attribute *attr, const char *buf,                \
+       size_t count)                                                   \
+{                                                                      \
+       return store_##file_name(common_tunables, buf, count);          \
+}                                                                      \
+                                                                       \
+static ssize_t store_##file_name##_gov_pol                             \
+(struct cpufreq_policy *policy, const char *buf, size_t count)         \
+{                                                                      \
+       return store_##file_name(policy->governor_data, buf, count);    \
+}
+
+#define show_store_gov_pol_sys(file_name)                              \
+show_gov_pol_sys(file_name);                                           \
+store_gov_pol_sys(file_name)
+
+show_store_gov_pol_sys(target_loads);
+show_store_gov_pol_sys(above_hispeed_delay);
+show_store_gov_pol_sys(hispeed_freq);
+show_store_gov_pol_sys(go_hispeed_load);
+show_store_gov_pol_sys(min_sample_time);
+show_store_gov_pol_sys(timer_rate);
+show_store_gov_pol_sys(timer_slack);
+show_store_gov_pol_sys(boost);
+store_gov_pol_sys(boostpulse);
+show_store_gov_pol_sys(boostpulse_duration);
+show_store_gov_pol_sys(io_is_busy);
+
+#define gov_sys_attr_rw(_name)                                         \
+static struct global_attr _name##_gov_sys =                            \
+__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
+
+#define gov_pol_attr_rw(_name)                                         \
+static struct freq_attr _name##_gov_pol =                              \
+__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define gov_sys_pol_attr_rw(_name)                                     \
+       gov_sys_attr_rw(_name);                                         \
+       gov_pol_attr_rw(_name)
+
+gov_sys_pol_attr_rw(target_loads);
+gov_sys_pol_attr_rw(above_hispeed_delay);
+gov_sys_pol_attr_rw(hispeed_freq);
+gov_sys_pol_attr_rw(go_hispeed_load);
+gov_sys_pol_attr_rw(min_sample_time);
+gov_sys_pol_attr_rw(timer_rate);
+gov_sys_pol_attr_rw(timer_slack);
+gov_sys_pol_attr_rw(boost);
+gov_sys_pol_attr_rw(boostpulse_duration);
+gov_sys_pol_attr_rw(io_is_busy);
+
+static struct global_attr boostpulse_gov_sys =
+       __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
+
+static struct freq_attr boostpulse_gov_pol =
+       __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
+
+/* One Governor instance for entire system */
+static struct attribute *interactive_attributes_gov_sys[] = {
+       &target_loads_gov_sys.attr,
+       &above_hispeed_delay_gov_sys.attr,
+       &hispeed_freq_gov_sys.attr,
+       &go_hispeed_load_gov_sys.attr,
+       &min_sample_time_gov_sys.attr,
+       &timer_rate_gov_sys.attr,
+       &timer_slack_gov_sys.attr,
+       &boost_gov_sys.attr,
+       &boostpulse_gov_sys.attr,
+       &boostpulse_duration_gov_sys.attr,
+       &io_is_busy_gov_sys.attr,
+       NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_sys = {
+       .attrs = interactive_attributes_gov_sys,
+       .name = "interactive",
+};
+
+/* Per policy governor instance */
+static struct attribute *interactive_attributes_gov_pol[] = {
+       &target_loads_gov_pol.attr,
+       &above_hispeed_delay_gov_pol.attr,
+       &hispeed_freq_gov_pol.attr,
+       &go_hispeed_load_gov_pol.attr,
+       &min_sample_time_gov_pol.attr,
+       &timer_rate_gov_pol.attr,
+       &timer_slack_gov_pol.attr,
+       &boost_gov_pol.attr,
+       &boostpulse_gov_pol.attr,
+       &boostpulse_duration_gov_pol.attr,
+       &io_is_busy_gov_pol.attr,
+       NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_pol = {
+       .attrs = interactive_attributes_gov_pol,
+       .name = "interactive",
+};
+
+static struct attribute_group *get_sysfs_attr(void)
+{
+       if (have_governor_per_policy())
+               return &interactive_attr_group_gov_pol;
+       else
+               return &interactive_attr_group_gov_sys;
+}
+
+static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
+                                            unsigned long val,
+                                            void *data)
+{
+       switch (val) {
+       case IDLE_START:
+               cpufreq_interactive_idle_start();
+               break;
+       case IDLE_END:
+               cpufreq_interactive_idle_end();
+               break;
+       }
+
+       return 0;
+}
+
+static struct notifier_block cpufreq_interactive_idle_nb = {
+       .notifier_call = cpufreq_interactive_idle_notifier,
+};
+
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+               unsigned int event)
+{
+       int rc;
+       unsigned int j;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       struct cpufreq_frequency_table *freq_table;
+       struct cpufreq_interactive_tunables *tunables;
+       unsigned long flags;
+
+       if (have_governor_per_policy())
+               tunables = policy->governor_data;
+       else
+               tunables = common_tunables;
+
+       WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
+
+       switch (event) {
+       case CPUFREQ_GOV_POLICY_INIT:
+               if (have_governor_per_policy()) {
+                       WARN_ON(tunables);
+               } else if (tunables) {
+                       tunables->usage_count++;
+                       policy->governor_data = tunables;
+                       return 0;
+               }
+
+               tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
+               if (!tunables) {
+                       pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
+                       return -ENOMEM;
+               }
+
+               tunables->usage_count = 1;
+               tunables->above_hispeed_delay = default_above_hispeed_delay;
+               tunables->nabove_hispeed_delay =
+                       ARRAY_SIZE(default_above_hispeed_delay);
+               tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+               tunables->target_loads = default_target_loads;
+               tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
+               tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+               tunables->timer_rate = DEFAULT_TIMER_RATE;
+               tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+               tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
+
+               spin_lock_init(&tunables->target_loads_lock);
+               spin_lock_init(&tunables->above_hispeed_delay_lock);
+
+               policy->governor_data = tunables;
+               if (!have_governor_per_policy())
+                       common_tunables = tunables;
+
+               rc = sysfs_create_group(get_governor_parent_kobj(policy),
+                               get_sysfs_attr());
+               if (rc) {
+                       kfree(tunables);
+                       policy->governor_data = NULL;
+                       if (!have_governor_per_policy())
+                               common_tunables = NULL;
+                       return rc;
+               }
+
+               if (!policy->governor->initialized) {
+                       idle_notifier_register(&cpufreq_interactive_idle_nb);
+                       cpufreq_register_notifier(&cpufreq_notifier_block,
+                                       CPUFREQ_TRANSITION_NOTIFIER);
+               }
+
+               break;
+
+       case CPUFREQ_GOV_POLICY_EXIT:
+               if (!--tunables->usage_count) {
+                       if (policy->governor->initialized == 1) {
+                               cpufreq_unregister_notifier(&cpufreq_notifier_block,
+                                               CPUFREQ_TRANSITION_NOTIFIER);
+                               idle_notifier_unregister(&cpufreq_interactive_idle_nb);
+                       }
+
+                       sysfs_remove_group(get_governor_parent_kobj(policy),
+                                       get_sysfs_attr());
+                       kfree(tunables);
+                       common_tunables = NULL;
+               }
+
+               policy->governor_data = NULL;
+               break;
+
+       case CPUFREQ_GOV_START:
+               mutex_lock(&gov_lock);
+
+               freq_table = cpufreq_frequency_get_table(policy->cpu);
+               if (!tunables->hispeed_freq)
+                       tunables->hispeed_freq = policy->max;
+
+               for_each_cpu(j, policy->cpus) {
+                       pcpu = &per_cpu(cpuinfo, j);
+                       pcpu->policy = policy;
+                       pcpu->target_freq = policy->cur;
+                       pcpu->freq_table = freq_table;
+                       pcpu->floor_freq = pcpu->target_freq;
+                       pcpu->floor_validate_time =
+                               ktime_to_us(ktime_get());
+                       pcpu->hispeed_validate_time =
+                               pcpu->floor_validate_time;
+                       pcpu->max_freq = policy->max;
+                       down_write(&pcpu->enable_sem);
+                       del_timer_sync(&pcpu->cpu_timer);
+                       del_timer_sync(&pcpu->cpu_slack_timer);
+                       cpufreq_interactive_timer_start(tunables, j);
+                       pcpu->governor_enabled = 1;
+                       up_write(&pcpu->enable_sem);
+               }
+
+               mutex_unlock(&gov_lock);
+               break;
+
+       case CPUFREQ_GOV_STOP:
+               mutex_lock(&gov_lock);
+               for_each_cpu(j, policy->cpus) {
+                       pcpu = &per_cpu(cpuinfo, j);
+                       down_write(&pcpu->enable_sem);
+                       pcpu->governor_enabled = 0;
+                       del_timer_sync(&pcpu->cpu_timer);
+                       del_timer_sync(&pcpu->cpu_slack_timer);
+                       up_write(&pcpu->enable_sem);
+               }
+
+               mutex_unlock(&gov_lock);
+               break;
+
+       case CPUFREQ_GOV_LIMITS:
+               if (policy->max < policy->cur)
+                       __cpufreq_driver_target(policy,
+                                       policy->max, CPUFREQ_RELATION_H);
+               else if (policy->min > policy->cur)
+                       __cpufreq_driver_target(policy,
+                                       policy->min, CPUFREQ_RELATION_L);
+               for_each_cpu(j, policy->cpus) {
+                       pcpu = &per_cpu(cpuinfo, j);
+
+                       down_read(&pcpu->enable_sem);
+                       if (pcpu->governor_enabled == 0) {
+                               up_read(&pcpu->enable_sem);
+                               continue;
+                       }
+
+                       spin_lock_irqsave(&pcpu->target_freq_lock, flags);
+                       if (policy->max < pcpu->target_freq)
+                               pcpu->target_freq = policy->max;
+                       else if (policy->min > pcpu->target_freq)
+                               pcpu->target_freq = policy->min;
+
+                       spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+                       up_read(&pcpu->enable_sem);
+
+                       /* Reschedule timer only if policy->max is raised.
+                        * Delete the timers, else the timer callback may
+                        * return without re-arm the timer when failed
+                        * acquire the semaphore. This race may cause timer
+                        * stopped unexpectedly.
+                        */
+
+                       if (policy->max > pcpu->max_freq) {
+                               down_write(&pcpu->enable_sem);
+                               del_timer_sync(&pcpu->cpu_timer);
+                               del_timer_sync(&pcpu->cpu_slack_timer);
+                               cpufreq_interactive_timer_start(tunables, j);
+                               up_write(&pcpu->enable_sem);
+                       }
+
+                       pcpu->max_freq = policy->max;
+               }
+               break;
+       }
+       return 0;
+}
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_interactive = {
+       .name = "interactive",
+       .governor = cpufreq_governor_interactive,
+       .max_transition_latency = 10000000,
+       .owner = THIS_MODULE,
+};
+
+static void cpufreq_interactive_nop_timer(unsigned long data)
+{
+}
+
+static int __init cpufreq_interactive_init(void)
+{
+       unsigned int i;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+       /* Initalize per-cpu timers */
+       for_each_possible_cpu(i) {
+               pcpu = &per_cpu(cpuinfo, i);
+               init_timer_deferrable(&pcpu->cpu_timer);
+               pcpu->cpu_timer.function = cpufreq_interactive_timer;
+               pcpu->cpu_timer.data = i;
+               init_timer(&pcpu->cpu_slack_timer);
+               pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
+               spin_lock_init(&pcpu->load_lock);
+               spin_lock_init(&pcpu->target_freq_lock);
+               init_rwsem(&pcpu->enable_sem);
+       }
+
+       spin_lock_init(&speedchange_cpumask_lock);
+       mutex_init(&gov_lock);
+       speedchange_task =
+               kthread_create(cpufreq_interactive_speedchange_task, NULL,
+                              "cfinteractive");
+       if (IS_ERR(speedchange_task))
+               return PTR_ERR(speedchange_task);
+
+       sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
+       get_task_struct(speedchange_task);
+
+       /* NB: wake up so the thread does not look hung to the freezer */
+       wake_up_process(speedchange_task);
+
+       return cpufreq_register_governor(&cpufreq_gov_interactive);
+}
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+fs_initcall(cpufreq_interactive_init);
+#else
+module_init(cpufreq_interactive_init);
+#endif
+
+static void __exit cpufreq_interactive_exit(void)
+{
+       cpufreq_unregister_governor(&cpufreq_gov_interactive);
+       kthread_stop(speedchange_task);
+       put_task_struct(speedchange_task);
+}
+
+module_exit(cpufreq_interactive_exit);
+
+MODULE_AUTHOR("Mike Chan <mike@android.com>");
+MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
+       "Latency sensitive workloads");
+MODULE_LICENSE("GPL");
index 66733f1d55d48be8d335d2ee2ac078cdee68df5d..0aba4d750559c12bf91d58bf2740d7553528521d 100644 (file)
@@ -20,6 +20,8 @@
 #include <linux/kobject.h>
 #include <linux/spinlock.h>
 #include <linux/notifier.h>
+#include <linux/sort.h>
+#include <linux/err.h>
 #include <asm/cputime.h>
 #ifdef CONFIG_BL_SWITCHER
 #include <asm/bL_switcher.h>
@@ -41,6 +43,20 @@ struct cpufreq_stats {
 #endif
 };
 
+struct all_cpufreq_stats {
+       unsigned int state_num;
+       cputime64_t *time_in_state;
+       unsigned int *freq_table;
+};
+
+struct all_freq_table {
+       unsigned int *freq_table;
+       unsigned int table_size;
+};
+
+static struct all_freq_table *all_freq_table;
+
+static DEFINE_PER_CPU(struct all_cpufreq_stats *, all_cpufreq_stats);
 static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
 
 struct cpufreq_stats_attribute {
@@ -51,14 +67,24 @@ struct cpufreq_stats_attribute {
 static int cpufreq_stats_update(unsigned int cpu)
 {
        struct cpufreq_stats *stat;
+       struct all_cpufreq_stats *all_stat;
        unsigned long long cur_time;
 
        cur_time = get_jiffies_64();
        spin_lock(&cpufreq_stats_lock);
        stat = per_cpu(cpufreq_stats_table, cpu);
-       if (stat->time_in_state)
+       all_stat = per_cpu(all_cpufreq_stats, cpu);
+       if (!stat) {
+               spin_unlock(&cpufreq_stats_lock);
+               return 0;
+       }
+       if (stat->time_in_state) {
                stat->time_in_state[stat->last_index] +=
                        cur_time - stat->last_time;
+               if (all_stat)
+                       all_stat->time_in_state[stat->last_index] +=
+                                       cur_time - stat->last_time;
+       }
        stat->last_time = cur_time;
        spin_unlock(&cpufreq_stats_lock);
        return 0;
@@ -89,6 +115,62 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
        return len;
 }
 
+static int get_index_all_cpufreq_stat(struct all_cpufreq_stats *all_stat,
+               unsigned int freq)
+{
+       int i;
+       if (!all_stat)
+               return -1;
+       for (i = 0; i < all_stat->state_num; i++) {
+               if (all_stat->freq_table[i] == freq)
+                       return i;
+       }
+       return -1;
+}
+
+static ssize_t show_all_time_in_state(struct kobject *kobj,
+               struct kobj_attribute *attr, char *buf)
+{
+       ssize_t len = 0;
+       unsigned int i, cpu, freq, index;
+       struct all_cpufreq_stats *all_stat;
+       struct cpufreq_policy *policy;
+
+       len += scnprintf(buf + len, PAGE_SIZE - len, "freq\t\t");
+       for_each_possible_cpu(cpu) {
+               len += scnprintf(buf + len, PAGE_SIZE - len, "cpu%d\t\t", cpu);
+               if (cpu_online(cpu))
+                       cpufreq_stats_update(cpu);
+       }
+
+       if (!all_freq_table)
+               goto out;
+       for (i = 0; i < all_freq_table->table_size; i++) {
+               freq = all_freq_table->freq_table[i];
+               len += scnprintf(buf + len, PAGE_SIZE - len, "\n%u\t\t", freq);
+               for_each_possible_cpu(cpu) {
+                       policy = cpufreq_cpu_get(cpu);
+                       if (policy == NULL)
+                               continue;
+                       all_stat = per_cpu(all_cpufreq_stats, policy->cpu);
+                       index = get_index_all_cpufreq_stat(all_stat, freq);
+                       if (index != -1) {
+                               len += scnprintf(buf + len, PAGE_SIZE - len,
+                                       "%llu\t\t", (unsigned long long)
+                                       cputime64_to_clock_t(all_stat->time_in_state[index]));
+                       } else {
+                               len += scnprintf(buf + len, PAGE_SIZE - len,
+                                               "N/A\t\t");
+                       }
+                       cpufreq_cpu_put(policy);
+               }
+       }
+
+out:
+       len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+       return len;
+}
+
 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
 static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
 {
@@ -152,6 +234,9 @@ static struct attribute_group stats_attr_group = {
        .name = "stats"
 };
 
+static struct kobj_attribute _attr_all_time_in_state = __ATTR(all_time_in_state,
+               0444, show_all_time_in_state, NULL);
+
 static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
 {
        int index;
@@ -198,6 +283,29 @@ put_ref:
        cpufreq_cpu_put(policy);
 }
 
+static void cpufreq_allstats_free(void)
+{
+       int i;
+       struct all_cpufreq_stats *all_stat;
+
+       sysfs_remove_file(cpufreq_global_kobject,
+                                               &_attr_all_time_in_state.attr);
+
+       for (i = 0; i < total_cpus; i++) {
+               all_stat = per_cpu(all_cpufreq_stats, i);
+               if (!all_stat)
+                       continue;
+               kfree(all_stat->time_in_state);
+               kfree(all_stat);
+               per_cpu(all_cpufreq_stats, i) = NULL;
+       }
+       if (all_freq_table) {
+               kfree(all_freq_table->freq_table);
+               kfree(all_freq_table);
+               all_freq_table = NULL;
+       }
+}
+
 static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
                struct cpufreq_frequency_table *table)
 {
@@ -284,6 +392,106 @@ static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
        stat->cpu = policy->cpu;
 }
 
+static int compare_for_sort(const void *lhs_ptr, const void *rhs_ptr)
+{
+       unsigned int lhs = *(const unsigned int *)(lhs_ptr);
+       unsigned int rhs = *(const unsigned int *)(rhs_ptr);
+       if (lhs < rhs)
+               return -1;
+       if (lhs > rhs)
+               return 1;
+       return 0;
+}
+
+static bool check_all_freq_table(unsigned int freq)
+{
+       int i;
+       for (i = 0; i < all_freq_table->table_size; i++) {
+               if (freq == all_freq_table->freq_table[i])
+                       return true;
+       }
+       return false;
+}
+
+static void create_all_freq_table(void)
+{
+       all_freq_table = kzalloc(sizeof(struct all_freq_table),
+                       GFP_KERNEL);
+       if (!all_freq_table)
+               pr_warn("could not allocate memory for all_freq_table\n");
+       return;
+}
+
+static void add_all_freq_table(unsigned int freq)
+{
+       unsigned int size;
+       size = sizeof(unsigned int) * (all_freq_table->table_size + 1);
+       all_freq_table->freq_table = krealloc(all_freq_table->freq_table,
+                       size, GFP_KERNEL);
+       if (IS_ERR(all_freq_table->freq_table)) {
+               pr_warn("Could not reallocate memory for freq_table\n");
+               all_freq_table->freq_table = NULL;
+               return;
+       }
+       all_freq_table->freq_table[all_freq_table->table_size++] = freq;
+}
+
+static void cpufreq_allstats_create(unsigned int cpu)
+{
+       int i , j = 0;
+       unsigned int alloc_size, count = 0;
+       struct cpufreq_frequency_table *table = cpufreq_frequency_get_table(cpu);
+       struct all_cpufreq_stats *all_stat;
+       bool sort_needed = false;
+
+       if (!table)
+               return;
+
+       for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+               unsigned int freq = table[i].frequency;
+               if (freq == CPUFREQ_ENTRY_INVALID)
+                       continue;
+               count++;
+       }
+
+       all_stat = kzalloc(sizeof(struct all_cpufreq_stats),
+                       GFP_KERNEL);
+       if (!all_stat) {
+               pr_warn("Cannot allocate memory for cpufreq stats\n");
+               return;
+       }
+
+       /*Allocate memory for freq table per cpu as well as clockticks per freq*/
+       alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
+       all_stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
+       if (!all_stat->time_in_state) {
+               pr_warn("Cannot allocate memory for cpufreq time_in_state\n");
+               kfree(all_stat);
+               all_stat = NULL;
+               return;
+       }
+       all_stat->freq_table = (unsigned int *)
+               (all_stat->time_in_state + count);
+
+       spin_lock(&cpufreq_stats_lock);
+       for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+               unsigned int freq = table[i].frequency;
+               if (freq == CPUFREQ_ENTRY_INVALID)
+                       continue;
+               all_stat->freq_table[j++] = freq;
+               if (all_freq_table && !check_all_freq_table(freq)) {
+                       add_all_freq_table(freq);
+                       sort_needed = true;
+               }
+       }
+       if (sort_needed)
+               sort(all_freq_table->freq_table, all_freq_table->table_size,
+                               sizeof(unsigned int), &compare_for_sort, NULL);
+       all_stat->state_num = j;
+       per_cpu(all_cpufreq_stats, cpu) = all_stat;
+       spin_unlock(&cpufreq_stats_lock);
+}
+
 static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
                unsigned long val, void *data)
 {
@@ -302,6 +510,10 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
        table = cpufreq_frequency_get_table(cpu);
        if (!table)
                return 0;
+
+       if (!per_cpu(all_cpufreq_stats, cpu))
+               cpufreq_allstats_create(cpu);
+
        ret = cpufreq_stats_create_table(policy, table);
        if (ret)
                return ret;
@@ -344,6 +556,30 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
        return 0;
 }
 
+static int cpufreq_stats_create_table_cpu(unsigned int cpu)
+{
+       struct cpufreq_policy *policy;
+       struct cpufreq_frequency_table *table;
+       int ret = -ENODEV;
+
+       policy = cpufreq_cpu_get(cpu);
+       if (!policy)
+               return -ENODEV;
+
+       table = cpufreq_frequency_get_table(cpu);
+       if (!table)
+               goto out;
+
+       if (!per_cpu(all_cpufreq_stats, cpu))
+               cpufreq_allstats_create(cpu);
+
+       ret = cpufreq_stats_create_table(policy, table);
+
+out:
+       cpufreq_cpu_put(policy);
+       return ret;
+}
+
 static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
                                               unsigned long action,
                                               void *hcpu)
@@ -363,6 +599,10 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
        case CPU_DEAD_FROZEN:
                cpufreq_stats_free_table(cpu);
                break;
+       case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
+               cpufreq_stats_create_table_cpu(cpu);
+               break;
        }
        return NOTIFY_OK;
 }
@@ -407,6 +647,12 @@ static int cpufreq_stats_setup(void)
                return ret;
        }
 
+       create_all_freq_table();
+       ret = sysfs_create_file(cpufreq_global_kobject,
+                       &_attr_all_time_in_state.attr);
+       if (ret)
+               pr_warn("Error creating sysfs file for cpufreq stats\n");
+
        return 0;
 }
 
@@ -423,6 +669,7 @@ static void cpufreq_stats_cleanup(void)
                cpufreq_stats_free_table(cpu);
                cpufreq_stats_free_sysfs(cpu);
        }
+       cpufreq_allstats_free();
 }
 
 #ifdef CONFIG_BL_SWITCHER
index bc580b67a65298a8bf65e699d562dfd6f237b697..33305fb3d5fcd7c06a1af2d9c862d70c47382e30 100644 (file)
@@ -173,7 +173,12 @@ static inline int performance_multiplier(void)
 
        /* for higher loadavg, we are more reluctant */
 
-       mult += 2 * get_loadavg();
+       /*
+        * this doesn't work as intended - it is almost always 0, but can
+        * sometimes, depending on workload, spike very high into the hundreds
+        * even when the average cpu load is under 10%.
+        */
+       /* mult += 2 * get_loadavg(); */
 
        /* for IO wait tasks (per cpu!) we add 5x each */
        mult += 10 * nr_iowait_cpu(smp_processor_id());
index c2534d62911cfd18434c9b2bb172cad265603e0d..a35c5b932eba6c21606e1d8c378f1d0a6fa0a53c 100644 (file)
@@ -136,7 +136,7 @@ static struct gpio_desc *gpio_to_desc(unsigned gpio)
  */
 static int desc_to_gpio(const struct gpio_desc *desc)
 {
-       return desc->chip->base + gpio_chip_hwgpio(desc);
+       return desc - &gpio_desc[0];
 }
 
 
@@ -1214,15 +1214,14 @@ int gpiochip_add(struct gpio_chip *chip)
                }
        }
 
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
 #ifdef CONFIG_PINCTRL
        INIT_LIST_HEAD(&chip->pin_ranges);
 #endif
 
        of_gpiochip_add(chip);
 
-unlock:
-       spin_unlock_irqrestore(&gpio_lock, flags);
-
        if (status)
                goto fail;
 
@@ -1235,6 +1234,9 @@ unlock:
                chip->label ? : "generic");
 
        return 0;
+
+unlock:
+       spin_unlock_irqrestore(&gpio_lock, flags);
 fail:
        /* failures here can mean systems won't boot... */
        pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
index 8453214ec3767d6c55aefe2bb99466d426904403..941ab3c287ec8f944712679d4c9f2c9b74007d5f 100644 (file)
@@ -768,6 +768,8 @@ static const char *keys[KEY_MAX + 1] = {
        [KEY_ALTERASE] = "AlternateErase",      [KEY_CANCEL] = "Cancel",
        [KEY_BRIGHTNESSDOWN] = "BrightnessDown", [KEY_BRIGHTNESSUP] = "BrightnessUp",
        [KEY_MEDIA] = "Media",                  [KEY_UNKNOWN] = "Unknown",
+       [BTN_DPAD_UP] = "BtnDPadUp",            [BTN_DPAD_DOWN] = "BtnDPadDown",
+       [BTN_DPAD_LEFT] = "BtnDPadLeft",        [BTN_DPAD_RIGHT] = "BtnDPadRight",
        [BTN_0] = "Btn0",                       [BTN_1] = "Btn1",
        [BTN_2] = "Btn2",                       [BTN_3] = "Btn3",
        [BTN_4] = "Btn4",                       [BTN_5] = "Btn5",
@@ -797,7 +799,8 @@ static const char *keys[KEY_MAX + 1] = {
        [BTN_TOOL_MOUSE] = "ToolMouse",         [BTN_TOOL_LENS] = "ToolLens",
        [BTN_TOUCH] = "Touch",                  [BTN_STYLUS] = "Stylus",
        [BTN_STYLUS2] = "Stylus2",              [BTN_TOOL_DOUBLETAP] = "ToolDoubleTap",
-       [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_GEAR_DOWN] = "WheelBtn",
+       [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_TOOL_QUADTAP] = "ToolQuadrupleTap",
+       [BTN_GEAR_DOWN] = "WheelBtn",
        [BTN_GEAR_UP] = "Gear up",              [KEY_OK] = "Ok",
        [KEY_SELECT] = "Select",                [KEY_GOTO] = "Goto",
        [KEY_CLEAR] = "Clear",                  [KEY_POWER2] = "Power2",
@@ -852,6 +855,16 @@ static const char *keys[KEY_MAX + 1] = {
        [KEY_KBDILLUMDOWN] = "KbdIlluminationDown",
        [KEY_KBDILLUMUP] = "KbdIlluminationUp",
        [KEY_SWITCHVIDEOMODE] = "SwitchVideoMode",
+       [KEY_BUTTONCONFIG] = "ButtonConfig",
+       [KEY_TASKMANAGER] = "TaskManager",
+       [KEY_JOURNAL] = "Journal",
+       [KEY_CONTROLPANEL] = "ControlPanel",
+       [KEY_APPSELECT] = "AppSelect",
+       [KEY_SCREENSAVER] = "ScreenSaver",
+       [KEY_VOICECOMMAND] = "VoiceCommand",
+       [KEY_BRIGHTNESS_MIN] = "BrightnessMin",
+       [KEY_BRIGHTNESS_MAX] = "BrightnessMax",
+       [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
 };
 
 static const char *relatives[REL_MAX + 1] = {
index 012880a2228c73c1f3e6a89b02598d6d7ef3843d..0f9950e8239a75b9bf22d1d22f6cc3e0e295cc77 100644 (file)
@@ -726,6 +726,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x06c: map_key_clear(KEY_YELLOW);          break;
                case 0x06d: map_key_clear(KEY_ZOOM);            break;
 
+               case 0x06f: map_key_clear(KEY_BRIGHTNESSUP);            break;
+               case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN);          break;
+               case 0x072: map_key_clear(KEY_BRIGHTNESS_TOGGLE);       break;
+               case 0x073: map_key_clear(KEY_BRIGHTNESS_MIN);          break;
+               case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX);          break;
+               case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO);         break;
+
                case 0x082: map_key_clear(KEY_VIDEO_NEXT);      break;
                case 0x083: map_key_clear(KEY_LAST);            break;
                case 0x084: map_key_clear(KEY_ENTER);           break;
@@ -766,6 +773,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x0bf: map_key_clear(KEY_SLOW);            break;
 
                case 0x0cd: map_key_clear(KEY_PLAYPAUSE);       break;
+               case 0x0cf: map_key_clear(KEY_VOICECOMMAND);    break;
                case 0x0e0: map_abs_clear(ABS_VOLUME);          break;
                case 0x0e2: map_key_clear(KEY_MUTE);            break;
                case 0x0e5: map_key_clear(KEY_BASSBOOST);       break;
@@ -773,6 +781,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x0ea: map_key_clear(KEY_VOLUMEDOWN);      break;
                case 0x0f5: map_key_clear(KEY_SLOW);            break;
 
+               case 0x181: map_key_clear(KEY_BUTTONCONFIG);    break;
                case 0x182: map_key_clear(KEY_BOOKMARKS);       break;
                case 0x183: map_key_clear(KEY_CONFIG);          break;
                case 0x184: map_key_clear(KEY_WORDPROCESSOR);   break;
@@ -786,6 +795,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x18c: map_key_clear(KEY_VOICEMAIL);       break;
                case 0x18d: map_key_clear(KEY_ADDRESSBOOK);     break;
                case 0x18e: map_key_clear(KEY_CALENDAR);        break;
+               case 0x18f: map_key_clear(KEY_TASKMANAGER);     break;
+               case 0x190: map_key_clear(KEY_JOURNAL);         break;
                case 0x191: map_key_clear(KEY_FINANCE);         break;
                case 0x192: map_key_clear(KEY_CALC);            break;
                case 0x193: map_key_clear(KEY_PLAYER);          break;
@@ -794,10 +805,16 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x199: map_key_clear(KEY_CHAT);            break;
                case 0x19c: map_key_clear(KEY_LOGOFF);          break;
                case 0x19e: map_key_clear(KEY_COFFEE);          break;
+               case 0x19f: map_key_clear(KEY_CONTROLPANEL);            break;
+               case 0x1a2: map_key_clear(KEY_APPSELECT);               break;
+               case 0x1a3: map_key_clear(KEY_NEXT);            break;
+               case 0x1a4: map_key_clear(KEY_PREVIOUS);        break;
                case 0x1a6: map_key_clear(KEY_HELP);            break;
                case 0x1a7: map_key_clear(KEY_DOCUMENTS);       break;
                case 0x1ab: map_key_clear(KEY_SPELLCHECK);      break;
                case 0x1ae: map_key_clear(KEY_KEYBOARD);        break;
+               case 0x1b1: map_key_clear(KEY_SCREENSAVER);             break;
+               case 0x1b4: map_key_clear(KEY_FILE);            break;
                case 0x1b6: map_key_clear(KEY_IMAGES);          break;
                case 0x1b7: map_key_clear(KEY_AUDIO);           break;
                case 0x1b8: map_key_clear(KEY_VIDEO);           break;
@@ -1338,8 +1355,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
                                 * UGCI) cram a lot of unrelated inputs into the
                                 * same interface. */
                                hidinput->report = report;
-                               if (drv->input_configured)
-                                       drv->input_configured(hid, hidinput);
+                               if (drv->input_configured &&
+                                   drv->input_configured(hid, hidinput))
+                                       goto out_cleanup;
                                if (input_register_device(hidinput->input))
                                        goto out_cleanup;
                                hidinput = NULL;
@@ -1360,8 +1378,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
        }
 
        if (hidinput) {
-               if (drv->input_configured)
-                       drv->input_configured(hid, hidinput);
+               if (drv->input_configured &&
+                   drv->input_configured(hid, hidinput))
+                       goto out_cleanup;
                if (input_register_device(hidinput->input))
                        goto out_cleanup;
        }
index 3d8e58ac7499e4334c0ac0931afb1bfaf675cdeb..fb9ac1266719ea0ef22669e49ebd762c6dace115 100644 (file)
@@ -443,6 +443,16 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
            (usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON)
                td->mt_flags |= INPUT_MT_POINTER;
 
+       /* Only map fields from TouchScreen or TouchPad collections.
+         * We need to ignore fields that belong to other collections
+         * such as Mouse that might have the same GenericDesktop usages. */
+       if (field->application == HID_DG_TOUCHSCREEN)
+               set_bit(INPUT_PROP_DIRECT, hi->input->propbit);
+       else if (field->application == HID_DG_TOUCHPAD)
+               set_bit(INPUT_PROP_POINTER, hi->input->propbit);
+       else
+               return 0;
+
        if (usage->usage_index)
                prev_usage = &field->usage[usage->usage_index - 1];
 
@@ -772,12 +782,13 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
                mt_sync_frame(td, report->field[0]->hidinput->input);
 }
 
-static void mt_touch_input_configured(struct hid_device *hdev,
+static int mt_touch_input_configured(struct hid_device *hdev,
                                        struct hid_input *hi)
 {
        struct mt_device *td = hid_get_drvdata(hdev);
        struct mt_class *cls = &td->mtclass;
        struct input_dev *input = hi->input;
+       int ret;
 
        if (!td->maxcontacts)
                td->maxcontacts = MT_DEFAULT_MAXCONTACT;
@@ -792,9 +803,12 @@ static void mt_touch_input_configured(struct hid_device *hdev,
        if (cls->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
                td->mt_flags |= INPUT_MT_DROP_UNUSED;
 
-       input_mt_init_slots(input, td->maxcontacts, td->mt_flags);
+       ret = input_mt_init_slots(input, td->maxcontacts, td->mt_flags);
+       if (ret)
+               return ret;
 
        td->mt_flags = 0;
+       return 0;
 }
 
 static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -927,19 +941,21 @@ static void mt_post_parse(struct mt_device *td)
                cls->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
 }
 
-static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
+static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
 {
        struct mt_device *td = hid_get_drvdata(hdev);
        char *name = kstrdup(hdev->name, GFP_KERNEL);
+       int ret = 0;
 
        if (name)
                hi->input->name = name;
 
        if (hi->report->id == td->mt_report_id)
-               mt_touch_input_configured(hdev, hi);
+               ret = mt_touch_input_configured(hdev, hi);
 
        if (hi->report->id == td->pen_report_id)
                mt_pen_input_configured(hdev, hi);
+       return ret;
 }
 
 static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
index 10aa9ef86cece1b80fa09c57d2a52e344e4dfc12..145c98617936ac979cf3ff7e417e4f1412a1e289 100644 (file)
@@ -35,6 +35,7 @@
  */
 struct iio_event_interface {
        wait_queue_head_t       wait;
+       struct mutex            read_lock;
        DECLARE_KFIFO(det_events, struct iio_event_data, 16);
 
        struct list_head        dev_attr_list;
@@ -97,14 +98,16 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
        if (count < sizeof(struct iio_event_data))
                return -EINVAL;
 
-       spin_lock_irq(&ev_int->wait.lock);
+       if (mutex_lock_interruptible(&ev_int->read_lock))
+               return -ERESTARTSYS;
+
        if (kfifo_is_empty(&ev_int->det_events)) {
                if (filep->f_flags & O_NONBLOCK) {
                        ret = -EAGAIN;
                        goto error_unlock;
                }
                /* Blocking on device; waiting for something to be there */
-               ret = wait_event_interruptible_locked_irq(ev_int->wait,
+               ret = wait_event_interruptible(ev_int->wait,
                                        !kfifo_is_empty(&ev_int->det_events));
                if (ret)
                        goto error_unlock;
@@ -114,7 +117,7 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
        ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
 
 error_unlock:
-       spin_unlock_irq(&ev_int->wait.lock);
+       mutex_unlock(&ev_int->read_lock);
 
        return ret ? ret : copied;
 }
@@ -371,6 +374,7 @@ static void iio_setup_ev_int(struct iio_event_interface *ev_int)
 {
        INIT_KFIFO(ev_int->det_events);
        init_waitqueue_head(&ev_int->wait);
+       mutex_init(&ev_int->read_lock);
 }
 
 static const char *iio_event_group_name = "events";
@@ -434,6 +438,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
 
 error_free_setup_event_lines:
        __iio_remove_event_config_attrs(indio_dev);
+       mutex_destroy(&indio_dev->event_interface->read_lock);
        kfree(indio_dev->event_interface);
 error_ret:
 
@@ -446,5 +451,6 @@ void iio_device_unregister_eventset(struct iio_dev *indio_dev)
                return;
        __iio_remove_event_config_attrs(indio_dev);
        kfree(indio_dev->event_interface->group.attrs);
+       mutex_destroy(&indio_dev->event_interface->read_lock);
        kfree(indio_dev->event_interface);
 }
index a11ff74a5127019cb6f367d07ffae32429ccf37e..518efa2a9f526d874c837b78eeffe21160aa03de 100644 (file)
@@ -174,6 +174,25 @@ config INPUT_APMPOWER
          To compile this driver as a module, choose M here: the
          module will be called apm-power.
 
+config INPUT_KEYRESET
+       tristate "Reset key"
+       depends on INPUT
+       select INPUT_KEYCOMBO
+       ---help---
+         Say Y here if you want to reboot when some keys are pressed;
+
+         To compile this driver as a module, choose M here: the
+         module will be called keyreset.
+
+config INPUT_KEYCOMBO
+       tristate "Key combo"
+       depends on INPUT
+       ---help---
+         Say Y here if you want to take action when some keys are pressed;
+
+         To compile this driver as a module, choose M here: the
+         module will be called keycombo.
+
 comment "Input Device Drivers"
 
 source "drivers/input/keyboard/Kconfig"
index 5ca3f631497f4d8295cf42a1d2bf54ed9c4f9e16..ee4c06520bb436a12c4d80f26a0df277b63652e9 100644 (file)
@@ -25,3 +25,6 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN)       += touchscreen/
 obj-$(CONFIG_INPUT_MISC)       += misc/
 
 obj-$(CONFIG_INPUT_APMPOWER)   += apm-power.o
+obj-$(CONFIG_INPUT_KEYRESET)   += keyreset.o
+obj-$(CONFIG_INPUT_KEYCOMBO)   += keycombo.o
+
index c122dd2adc22bd16985cfea673132a4bff5b5c4c..f4897c8c15005ed943686e12ca3f022f24396933 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/major.h>
 #include <linux/device.h>
 #include <linux/cdev.h>
+#include <linux/wakelock.h>
 #include "input-compat.h"
 
 struct evdev {
@@ -46,6 +47,9 @@ struct evdev_client {
        unsigned int tail;
        unsigned int packet_head; /* [future] position of the first element of next packet */
        spinlock_t buffer_lock; /* protects access to buffer, head and tail */
+       struct wake_lock wake_lock;
+       bool use_wake_lock;
+       char name[28];
        struct fasync_struct *fasync;
        struct evdev *evdev;
        struct list_head node;
@@ -73,10 +77,14 @@ static void __pass_event(struct evdev_client *client,
                client->buffer[client->tail].value = 0;
 
                client->packet_head = client->tail;
+               if (client->use_wake_lock)
+                       wake_unlock(&client->wake_lock);
        }
 
        if (event->type == EV_SYN && event->code == SYN_REPORT) {
                client->packet_head = client->head;
+               if (client->use_wake_lock)
+                       wake_lock(&client->wake_lock);
                kill_fasync(&client->fasync, SIGIO, POLL_IN);
        }
 }
@@ -291,6 +299,8 @@ static int evdev_release(struct inode *inode, struct file *file)
        mutex_unlock(&evdev->mutex);
 
        evdev_detach_client(evdev, client);
+       if (client->use_wake_lock)
+               wake_lock_destroy(&client->wake_lock);
 
        if (is_vmalloc_addr(client))
                vfree(client);
@@ -328,6 +338,8 @@ static int evdev_open(struct inode *inode, struct file *file)
 
        client->bufsize = bufsize;
        spin_lock_init(&client->buffer_lock);
+       snprintf(client->name, sizeof(client->name), "%s-%d",
+                       dev_name(&evdev->dev), task_tgid_vnr(current));
        client->evdev = evdev;
        evdev_attach_client(evdev, client);
 
@@ -394,6 +406,9 @@ static int evdev_fetch_next_event(struct evdev_client *client,
        if (have_event) {
                *event = client->buffer[client->tail++];
                client->tail &= client->bufsize - 1;
+               if (client->use_wake_lock &&
+                   client->packet_head == client->tail)
+                       wake_unlock(&client->wake_lock);
        }
 
        spin_unlock_irq(&client->buffer_lock);
@@ -682,6 +697,35 @@ static int evdev_handle_mt_request(struct input_dev *dev,
        return 0;
 }
 
+static int evdev_enable_suspend_block(struct evdev *evdev,
+                                     struct evdev_client *client)
+{
+       if (client->use_wake_lock)
+               return 0;
+
+       spin_lock_irq(&client->buffer_lock);
+       wake_lock_init(&client->wake_lock, WAKE_LOCK_SUSPEND, client->name);
+       client->use_wake_lock = true;
+       if (client->packet_head != client->tail)
+               wake_lock(&client->wake_lock);
+       spin_unlock_irq(&client->buffer_lock);
+       return 0;
+}
+
+static int evdev_disable_suspend_block(struct evdev *evdev,
+                                      struct evdev_client *client)
+{
+       if (!client->use_wake_lock)
+               return 0;
+
+       spin_lock_irq(&client->buffer_lock);
+       client->use_wake_lock = false;
+       wake_lock_destroy(&client->wake_lock);
+       spin_unlock_irq(&client->buffer_lock);
+
+       return 0;
+}
+
 static long evdev_do_ioctl(struct file *file, unsigned int cmd,
                           void __user *p, int compat_mode)
 {
@@ -763,6 +807,15 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
 
        case EVIOCSKEYCODE_V2:
                return evdev_handle_set_keycode_v2(dev, p);
+
+       case EVIOCGSUSPENDBLOCK:
+               return put_user(client->use_wake_lock, ip);
+
+       case EVIOCSSUSPENDBLOCK:
+               if (p)
+                       return evdev_enable_suspend_block(evdev, client);
+               else
+                       return evdev_disable_suspend_block(evdev, client);
        }
 
        size = _IOC_SIZE(cmd);
diff --git a/drivers/input/keycombo.c b/drivers/input/keycombo.c
new file mode 100644 (file)
index 0000000..2fba451
--- /dev/null
@@ -0,0 +1,261 @@
+/* drivers/input/keycombo.c
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/keycombo.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+struct keycombo_state {
+       struct input_handler input_handler;
+       unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+       unsigned long upbit[BITS_TO_LONGS(KEY_CNT)];
+       unsigned long key[BITS_TO_LONGS(KEY_CNT)];
+       spinlock_t lock;
+       struct  workqueue_struct *wq;
+       int key_down_target;
+       int key_down;
+       int key_up;
+       struct delayed_work key_down_work;
+       int delay;
+       struct work_struct key_up_work;
+       void (*key_up_fn)(void *);
+       void (*key_down_fn)(void *);
+       void *priv;
+       int key_is_down;
+       struct wakeup_source combo_held_wake_source;
+       struct wakeup_source combo_up_wake_source;
+};
+
+static void do_key_down(struct work_struct *work)
+{
+       struct delayed_work *dwork = container_of(work, struct delayed_work,
+                                                                       work);
+       struct keycombo_state *state = container_of(dwork,
+                                       struct keycombo_state, key_down_work);
+       if (state->key_down_fn)
+               state->key_down_fn(state->priv);
+}
+
+static void do_key_up(struct work_struct *work)
+{
+       struct keycombo_state *state = container_of(work, struct keycombo_state,
+                                                               key_up_work);
+       if (state->key_up_fn)
+               state->key_up_fn(state->priv);
+       __pm_relax(&state->combo_up_wake_source);
+}
+
+static void keycombo_event(struct input_handle *handle, unsigned int type,
+               unsigned int code, int value)
+{
+       unsigned long flags;
+       struct keycombo_state *state = handle->private;
+
+       if (type != EV_KEY)
+               return;
+
+       if (code >= KEY_MAX)
+               return;
+
+       if (!test_bit(code, state->keybit))
+               return;
+
+       spin_lock_irqsave(&state->lock, flags);
+       if (!test_bit(code, state->key) == !value)
+               goto done;
+       __change_bit(code, state->key);
+       if (test_bit(code, state->upbit)) {
+               if (value)
+                       state->key_up++;
+               else
+                       state->key_up--;
+       } else {
+               if (value)
+                       state->key_down++;
+               else
+                       state->key_down--;
+       }
+       if (state->key_down == state->key_down_target && state->key_up == 0) {
+               __pm_stay_awake(&state->combo_held_wake_source);
+               state->key_is_down = 1;
+               if (queue_delayed_work(state->wq, &state->key_down_work,
+                                                               state->delay))
+                       pr_debug("Key down work already queued!");
+       } else if (state->key_is_down) {
+               if (!cancel_delayed_work(&state->key_down_work)) {
+                       __pm_stay_awake(&state->combo_up_wake_source);
+                       queue_work(state->wq, &state->key_up_work);
+               }
+               __pm_relax(&state->combo_held_wake_source);
+               state->key_is_down = 0;
+       }
+done:
+       spin_unlock_irqrestore(&state->lock, flags);
+}
+
+static int keycombo_connect(struct input_handler *handler,
+               struct input_dev *dev,
+               const struct input_device_id *id)
+{
+       int i;
+       int ret;
+       struct input_handle *handle;
+       struct keycombo_state *state =
+               container_of(handler, struct keycombo_state, input_handler);
+       for (i = 0; i < KEY_MAX; i++) {
+               if (test_bit(i, state->keybit) && test_bit(i, dev->keybit))
+                       break;
+       }
+       if (i == KEY_MAX)
+               return -ENODEV;
+
+       handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+       if (!handle)
+               return -ENOMEM;
+
+       handle->dev = dev;
+       handle->handler = handler;
+       handle->name = KEYCOMBO_NAME;
+       handle->private = state;
+
+       ret = input_register_handle(handle);
+       if (ret)
+               goto err_input_register_handle;
+
+       ret = input_open_device(handle);
+       if (ret)
+               goto err_input_open_device;
+
+       return 0;
+
+err_input_open_device:
+       input_unregister_handle(handle);
+err_input_register_handle:
+       kfree(handle);
+       return ret;
+}
+
+static void keycombo_disconnect(struct input_handle *handle)
+{
+       input_close_device(handle);
+       input_unregister_handle(handle);
+       kfree(handle);
+}
+
+static const struct input_device_id keycombo_ids[] = {
+               {
+                               .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+                               .evbit = { BIT_MASK(EV_KEY) },
+               },
+               { },
+};
+MODULE_DEVICE_TABLE(input, keycombo_ids);
+
+static int keycombo_probe(struct platform_device *pdev)
+{
+       int ret;
+       int key, *keyp;
+       struct keycombo_state *state;
+       struct keycombo_platform_data *pdata = pdev->dev.platform_data;
+
+       if (!pdata)
+               return -EINVAL;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return -ENOMEM;
+
+       spin_lock_init(&state->lock);
+       keyp = pdata->keys_down;
+       while ((key = *keyp++)) {
+               if (key >= KEY_MAX)
+                       continue;
+               state->key_down_target++;
+               __set_bit(key, state->keybit);
+       }
+       if (pdata->keys_up) {
+               keyp = pdata->keys_up;
+               while ((key = *keyp++)) {
+                       if (key >= KEY_MAX)
+                               continue;
+                       __set_bit(key, state->keybit);
+                       __set_bit(key, state->upbit);
+               }
+       }
+
+       state->wq = alloc_ordered_workqueue("keycombo", 0);
+       if (!state->wq)
+               return -ENOMEM;
+
+       state->priv = pdata->priv;
+
+       if (pdata->key_down_fn)
+               state->key_down_fn = pdata->key_down_fn;
+       INIT_DELAYED_WORK(&state->key_down_work, do_key_down);
+
+       if (pdata->key_up_fn)
+               state->key_up_fn = pdata->key_up_fn;
+       INIT_WORK(&state->key_up_work, do_key_up);
+
+       wakeup_source_init(&state->combo_held_wake_source, "key combo");
+       wakeup_source_init(&state->combo_up_wake_source, "key combo up");
+       state->delay = msecs_to_jiffies(pdata->key_down_delay);
+
+       state->input_handler.event = keycombo_event;
+       state->input_handler.connect = keycombo_connect;
+       state->input_handler.disconnect = keycombo_disconnect;
+       state->input_handler.name = KEYCOMBO_NAME;
+       state->input_handler.id_table = keycombo_ids;
+       ret = input_register_handler(&state->input_handler);
+       if (ret) {
+               kfree(state);
+               return ret;
+       }
+       platform_set_drvdata(pdev, state);
+       return 0;
+}
+
+int keycombo_remove(struct platform_device *pdev)
+{
+       struct keycombo_state *state = platform_get_drvdata(pdev);
+       input_unregister_handler(&state->input_handler);
+       destroy_workqueue(state->wq);
+       kfree(state);
+       return 0;
+}
+
+
+struct platform_driver keycombo_driver = {
+               .driver.name = KEYCOMBO_NAME,
+               .probe = keycombo_probe,
+               .remove = keycombo_remove,
+};
+
+static int __init keycombo_init(void)
+{
+       return platform_driver_register(&keycombo_driver);
+}
+
+static void __exit keycombo_exit(void)
+{
+       return platform_driver_unregister(&keycombo_driver);
+}
+
+module_init(keycombo_init);
+module_exit(keycombo_exit);
diff --git a/drivers/input/keyreset.c b/drivers/input/keyreset.c
new file mode 100644 (file)
index 0000000..eaaccde
--- /dev/null
@@ -0,0 +1,143 @@
+/* drivers/input/keyreset.c
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/keyreset.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/keycombo.h>
+
+struct keyreset_state {
+       int restart_requested;
+       int (*reset_fn)(void);
+       struct platform_device *pdev_child;
+};
+
+static void do_restart(void)
+{
+       sys_sync();
+       kernel_restart(NULL);
+}
+
+static void do_reset_fn(void *priv)
+{
+       struct keyreset_state *state = priv;
+       if (state->restart_requested)
+               panic("keyboard reset failed, %d", state->restart_requested);
+       if (state->reset_fn) {
+               state->restart_requested = state->reset_fn();
+       } else {
+               pr_info("keyboard reset\n");
+               do_restart();
+               state->restart_requested = 1;
+       }
+}
+
+static int keyreset_probe(struct platform_device *pdev)
+{
+       int ret = -ENOMEM;
+       struct keycombo_platform_data *pdata_child;
+       struct keyreset_platform_data *pdata = pdev->dev.platform_data;
+       int up_size = 0, down_size = 0, size;
+       int key, *keyp;
+       struct keyreset_state *state;
+
+       if (!pdata)
+               return -EINVAL;
+       state = devm_kzalloc(&pdev->dev, sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return -ENOMEM;
+
+       state->pdev_child = platform_device_alloc(KEYCOMBO_NAME,
+                                                       PLATFORM_DEVID_AUTO);
+       if (!state->pdev_child)
+               return -ENOMEM;
+       state->pdev_child->dev.parent = &pdev->dev;
+
+       keyp = pdata->keys_down;
+       while ((key = *keyp++)) {
+               if (key >= KEY_MAX)
+                       continue;
+               down_size++;
+       }
+       if (pdata->keys_up) {
+               keyp = pdata->keys_up;
+               while ((key = *keyp++)) {
+                       if (key >= KEY_MAX)
+                               continue;
+                       up_size++;
+               }
+       }
+       size = sizeof(struct keycombo_platform_data)
+                       + sizeof(int) * (down_size + 1);
+       pdata_child = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+       if (!pdata_child)
+               goto error;
+       memcpy(pdata_child->keys_down, pdata->keys_down,
+                                               sizeof(int) * down_size);
+       if (up_size > 0) {
+               pdata_child->keys_up = devm_kzalloc(&pdev->dev, up_size + 1,
+                                                               GFP_KERNEL);
+               if (!pdata_child->keys_up)
+                       goto error;
+               memcpy(pdata_child->keys_up, pdata->keys_up,
+                                                       sizeof(int) * up_size);
+               if (!pdata_child->keys_up)
+                       goto error;
+       }
+       state->reset_fn = pdata->reset_fn;
+       pdata_child->key_down_fn = do_reset_fn;
+       pdata_child->priv = state;
+       pdata_child->key_down_delay = pdata->key_down_delay;
+       ret = platform_device_add_data(state->pdev_child, pdata_child, size);
+       if (ret)
+               goto error;
+       platform_set_drvdata(pdev, state);
+       return platform_device_add(state->pdev_child);
+error:
+       platform_device_put(state->pdev_child);
+       return ret;
+}
+
+int keyreset_remove(struct platform_device *pdev)
+{
+       struct keyreset_state *state = platform_get_drvdata(pdev);
+       platform_device_put(state->pdev_child);
+       return 0;
+}
+
+
+struct platform_driver keyreset_driver = {
+       .driver.name = KEYRESET_NAME,
+       .probe = keyreset_probe,
+       .remove = keyreset_remove,
+};
+
+static int __init keyreset_init(void)
+{
+       return platform_driver_register(&keyreset_driver);
+}
+
+static void __exit keyreset_exit(void)
+{
+       return platform_driver_unregister(&keyreset_driver);
+}
+
+module_init(keyreset_init);
+module_exit(keyreset_exit);
index bb698e1f9e429b1c5d6dee2b59f0603807918d84..4abf046e30b17e8dab7e3c49ee674fbc4960fa96 100644 (file)
@@ -299,6 +299,17 @@ config INPUT_ATI_REMOTE2
          To compile this driver as a module, choose M here: the module will be
          called ati_remote2.
 
+config INPUT_KEYCHORD
+       tristate "Key chord input driver support"
+       help
+         Say Y here if you want to enable the key chord driver
+         accessible at /dev/keychord.  This driver can be used
+         for receiving notifications when client specified key
+         combinations are pressed.
+
+         To compile this driver as a module, choose M here: the
+         module will be called keychord.
+
 config INPUT_KEYSPAN_REMOTE
        tristate "Keyspan DMR USB remote control"
        depends on USB_ARCH_HAS_HCD
@@ -434,6 +445,11 @@ config INPUT_SGI_BTNS
          To compile this driver as a module, choose M here: the
          module will be called sgi_btns.
 
+config INPUT_GPIO
+       tristate "GPIO driver support"
+       help
+         Say Y here if you want to support gpio based keys, wheels etc...
+
 config HP_SDC_RTC
        tristate "HP SDC Real Time Clock"
        depends on (GSC || HP300) && SERIO
index d7fc17f11d77aa70fdac018aa146e126d1e65e8c..6b0e8a677725a88e2aeb8354a6acd56248023699 100644 (file)
@@ -28,9 +28,11 @@ obj-$(CONFIG_INPUT_DA9055_ONKEY)     += da9055_onkey.o
 obj-$(CONFIG_INPUT_DM355EVM)           += dm355evm_keys.o
 obj-$(CONFIG_INPUT_GP2A)               += gp2ap002a00f.o
 obj-$(CONFIG_INPUT_GPIO_TILT_POLLED)   += gpio_tilt_polled.o
+obj-$(CONFIG_INPUT_GPIO)               += gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o
 obj-$(CONFIG_HP_SDC_RTC)               += hp_sdc_rtc.o
 obj-$(CONFIG_INPUT_IMS_PCU)            += ims-pcu.o
 obj-$(CONFIG_INPUT_IXP4XX_BEEPER)      += ixp4xx-beeper.o
+obj-$(CONFIG_INPUT_KEYCHORD)           += keychord.o
 obj-$(CONFIG_INPUT_KEYSPAN_REMOTE)     += keyspan_remote.o
 obj-$(CONFIG_INPUT_KXTJ9)              += kxtj9.o
 obj-$(CONFIG_INPUT_M68K_BEEP)          += m68kspkr.o
diff --git a/drivers/input/misc/gpio_axis.c b/drivers/input/misc/gpio_axis.c
new file mode 100644 (file)
index 0000000..0acf4a5
--- /dev/null
@@ -0,0 +1,192 @@
+/* drivers/input/misc/gpio_axis.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+struct gpio_axis_state {
+       struct gpio_event_input_devs *input_devs;
+       struct gpio_event_axis_info *info;
+       uint32_t pos;
+};
+
+uint16_t gpio_axis_4bit_gray_map_table[] = {
+       [0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */
+       [0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */
+       [0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */
+       [0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */
+       [0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */
+       [0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */
+       [0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */
+       [0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */
+};
+uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in)
+{
+       return gpio_axis_4bit_gray_map_table[in];
+}
+
+uint16_t gpio_axis_5bit_singletrack_map_table[] = {
+       [0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /*     10000 10100 11100 */
+       [0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /*     11110 11010 11000 */
+       [0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /*    01000 01010 01110  */
+       [0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /*    01111 01101 01100  */
+       [0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /*   00100 00101 00111   */
+       [0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /*   10111 10110 00110   */
+       [0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /*  00010 10010 10011    */
+       [0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /*  11011 01011 00011    */
+       [0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001     */
+       [0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001     */
+};
+uint16_t gpio_axis_5bit_singletrack_map(
+       struct gpio_event_axis_info *info, uint16_t in)
+{
+       return gpio_axis_5bit_singletrack_map_table[in];
+}
+
+static void gpio_event_update_axis(struct gpio_axis_state *as, int report)
+{
+       struct gpio_event_axis_info *ai = as->info;
+       int i;
+       int change;
+       uint16_t state = 0;
+       uint16_t pos;
+       uint16_t old_pos = as->pos;
+       for (i = ai->count - 1; i >= 0; i--)
+               state = (state << 1) | gpio_get_value(ai->gpio[i]);
+       pos = ai->map(ai, state);
+       if (ai->flags & GPIOEAF_PRINT_RAW)
+               pr_info("axis %d-%d raw %x, pos %d -> %d\n",
+                       ai->type, ai->code, state, old_pos, pos);
+       if (report && pos != old_pos) {
+               if (ai->type == EV_REL) {
+                       change = (ai->decoded_size + pos - old_pos) %
+                                 ai->decoded_size;
+                       if (change > ai->decoded_size / 2)
+                               change -= ai->decoded_size;
+                       if (change == ai->decoded_size / 2) {
+                               if (ai->flags & GPIOEAF_PRINT_EVENT)
+                                       pr_info("axis %d-%d unknown direction, "
+                                               "pos %d -> %d\n", ai->type,
+                                               ai->code, old_pos, pos);
+                               change = 0; /* no closest direction */
+                       }
+                       if (ai->flags & GPIOEAF_PRINT_EVENT)
+                               pr_info("axis %d-%d change %d\n",
+                                       ai->type, ai->code, change);
+                       input_report_rel(as->input_devs->dev[ai->dev],
+                                               ai->code, change);
+               } else {
+                       if (ai->flags & GPIOEAF_PRINT_EVENT)
+                               pr_info("axis %d-%d now %d\n",
+                                       ai->type, ai->code, pos);
+                       input_event(as->input_devs->dev[ai->dev],
+                                       ai->type, ai->code, pos);
+               }
+               input_sync(as->input_devs->dev[ai->dev]);
+       }
+       as->pos = pos;
+}
+
+static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id)
+{
+       struct gpio_axis_state *as = dev_id;
+       gpio_event_update_axis(as, 1);
+       return IRQ_HANDLED;
+}
+
+int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+                        struct gpio_event_info *info, void **data, int func)
+{
+       int ret;
+       int i;
+       int irq;
+       struct gpio_event_axis_info *ai;
+       struct gpio_axis_state *as;
+
+       ai = container_of(info, struct gpio_event_axis_info, info);
+       if (func == GPIO_EVENT_FUNC_SUSPEND) {
+               for (i = 0; i < ai->count; i++)
+                       disable_irq(gpio_to_irq(ai->gpio[i]));
+               return 0;
+       }
+       if (func == GPIO_EVENT_FUNC_RESUME) {
+               for (i = 0; i < ai->count; i++)
+                       enable_irq(gpio_to_irq(ai->gpio[i]));
+               return 0;
+       }
+
+       if (func == GPIO_EVENT_FUNC_INIT) {
+               *data = as = kmalloc(sizeof(*as), GFP_KERNEL);
+               if (as == NULL) {
+                       ret = -ENOMEM;
+                       goto err_alloc_axis_state_failed;
+               }
+               as->input_devs = input_devs;
+               as->info = ai;
+               if (ai->dev >= input_devs->count) {
+                       pr_err("gpio_event_axis: bad device index %d >= %d "
+                               "for %d:%d\n", ai->dev, input_devs->count,
+                               ai->type, ai->code);
+                       ret = -EINVAL;
+                       goto err_bad_device_index;
+               }
+
+               input_set_capability(input_devs->dev[ai->dev],
+                                    ai->type, ai->code);
+               if (ai->type == EV_ABS) {
+                       input_set_abs_params(input_devs->dev[ai->dev], ai->code,
+                                            0, ai->decoded_size - 1, 0, 0);
+               }
+               for (i = 0; i < ai->count; i++) {
+                       ret = gpio_request(ai->gpio[i], "gpio_event_axis");
+                       if (ret < 0)
+                               goto err_request_gpio_failed;
+                       ret = gpio_direction_input(ai->gpio[i]);
+                       if (ret < 0)
+                               goto err_gpio_direction_input_failed;
+                       ret = irq = gpio_to_irq(ai->gpio[i]);
+                       if (ret < 0)
+                               goto err_get_irq_num_failed;
+                       ret = request_irq(irq, gpio_axis_irq_handler,
+                                         IRQF_TRIGGER_RISING |
+                                         IRQF_TRIGGER_FALLING,
+                                         "gpio_event_axis", as);
+                       if (ret < 0)
+                               goto err_request_irq_failed;
+               }
+               gpio_event_update_axis(as, 0);
+               return 0;
+       }
+
+       ret = 0;
+       as = *data;
+       for (i = ai->count - 1; i >= 0; i--) {
+               free_irq(gpio_to_irq(ai->gpio[i]), as);
+err_request_irq_failed:
+err_get_irq_num_failed:
+err_gpio_direction_input_failed:
+               gpio_free(ai->gpio[i]);
+err_request_gpio_failed:
+               ;
+       }
+err_bad_device_index:
+       kfree(as);
+       *data = NULL;
+err_alloc_axis_state_failed:
+       return ret;
+}
diff --git a/drivers/input/misc/gpio_event.c b/drivers/input/misc/gpio_event.c
new file mode 100644 (file)
index 0000000..90f07eb
--- /dev/null
@@ -0,0 +1,228 @@
+/* drivers/input/misc/gpio_event.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct gpio_event {
+       struct gpio_event_input_devs *input_devs;
+       const struct gpio_event_platform_data *info;
+       void *state[0];
+};
+
+static int gpio_input_event(
+       struct input_dev *dev, unsigned int type, unsigned int code, int value)
+{
+       int i;
+       int devnr;
+       int ret = 0;
+       int tmp_ret;
+       struct gpio_event_info **ii;
+       struct gpio_event *ip = input_get_drvdata(dev);
+
+       for (devnr = 0; devnr < ip->input_devs->count; devnr++)
+               if (ip->input_devs->dev[devnr] == dev)
+                       break;
+       if (devnr == ip->input_devs->count) {
+               pr_err("gpio_input_event: unknown device %p\n", dev);
+               return -EIO;
+       }
+
+       for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) {
+               if ((*ii)->event) {
+                       tmp_ret = (*ii)->event(ip->input_devs, *ii,
+                                               &ip->state[i],
+                                               devnr, type, code, value);
+                       if (tmp_ret)
+                               ret = tmp_ret;
+               }
+       }
+       return ret;
+}
+
+static int gpio_event_call_all_func(struct gpio_event *ip, int func)
+{
+       int i;
+       int ret;
+       struct gpio_event_info **ii;
+
+       if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) {
+               ii = ip->info->info;
+               for (i = 0; i < ip->info->info_count; i++, ii++) {
+                       if ((*ii)->func == NULL) {
+                               ret = -ENODEV;
+                               pr_err("gpio_event_probe: Incomplete pdata, "
+                                       "no function\n");
+                               goto err_no_func;
+                       }
+                       if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend)
+                               continue;
+                       ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i],
+                                         func);
+                       if (ret) {
+                               pr_err("gpio_event_probe: function failed\n");
+                               goto err_func_failed;
+                       }
+               }
+               return 0;
+       }
+
+       ret = 0;
+       i = ip->info->info_count;
+       ii = ip->info->info + i;
+       while (i > 0) {
+               i--;
+               ii--;
+               if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend)
+                       continue;
+               (*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1);
+err_func_failed:
+err_no_func:
+               ;
+       }
+       return ret;
+}
+
+static void __maybe_unused gpio_event_suspend(struct gpio_event *ip)
+{
+       gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND);
+       if (ip->info->power)
+               ip->info->power(ip->info, 0);
+}
+
+static void __maybe_unused gpio_event_resume(struct gpio_event *ip)
+{
+       if (ip->info->power)
+               ip->info->power(ip->info, 1);
+       gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME);
+}
+
+static int gpio_event_probe(struct platform_device *pdev)
+{
+       int err;
+       struct gpio_event *ip;
+       struct gpio_event_platform_data *event_info;
+       int dev_count = 1;
+       int i;
+       int registered = 0;
+
+       event_info = pdev->dev.platform_data;
+       if (event_info == NULL) {
+               pr_err("gpio_event_probe: No pdata\n");
+               return -ENODEV;
+       }
+       if ((!event_info->name && !event_info->names[0]) ||
+           !event_info->info || !event_info->info_count) {
+               pr_err("gpio_event_probe: Incomplete pdata\n");
+               return -ENODEV;
+       }
+       if (!event_info->name)
+               while (event_info->names[dev_count])
+                       dev_count++;
+       ip = kzalloc(sizeof(*ip) +
+                    sizeof(ip->state[0]) * event_info->info_count +
+                    sizeof(*ip->input_devs) +
+                    sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL);
+       if (ip == NULL) {
+               err = -ENOMEM;
+               pr_err("gpio_event_probe: Failed to allocate private data\n");
+               goto err_kp_alloc_failed;
+       }
+       ip->input_devs = (void*)&ip->state[event_info->info_count];
+       platform_set_drvdata(pdev, ip);
+
+       for (i = 0; i < dev_count; i++) {
+               struct input_dev *input_dev = input_allocate_device();
+               if (input_dev == NULL) {
+                       err = -ENOMEM;
+                       pr_err("gpio_event_probe: "
+                               "Failed to allocate input device\n");
+                       goto err_input_dev_alloc_failed;
+               }
+               input_set_drvdata(input_dev, ip);
+               input_dev->name = event_info->name ?
+                                       event_info->name : event_info->names[i];
+               input_dev->event = gpio_input_event;
+               ip->input_devs->dev[i] = input_dev;
+       }
+       ip->input_devs->count = dev_count;
+       ip->info = event_info;
+       if (event_info->power)
+               ip->info->power(ip->info, 1);
+
+       err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT);
+       if (err)
+               goto err_call_all_func_failed;
+
+       for (i = 0; i < dev_count; i++) {
+               err = input_register_device(ip->input_devs->dev[i]);
+               if (err) {
+                       pr_err("gpio_event_probe: Unable to register %s "
+                               "input device\n", ip->input_devs->dev[i]->name);
+                       goto err_input_register_device_failed;
+               }
+               registered++;
+       }
+
+       return 0;
+
+err_input_register_device_failed:
+       gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+err_call_all_func_failed:
+       if (event_info->power)
+               ip->info->power(ip->info, 0);
+       for (i = 0; i < registered; i++)
+               input_unregister_device(ip->input_devs->dev[i]);
+       for (i = dev_count - 1; i >= registered; i--) {
+               input_free_device(ip->input_devs->dev[i]);
+err_input_dev_alloc_failed:
+               ;
+       }
+       kfree(ip);
+err_kp_alloc_failed:
+       return err;
+}
+
+static int gpio_event_remove(struct platform_device *pdev)
+{
+       struct gpio_event *ip = platform_get_drvdata(pdev);
+       int i;
+
+       gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+       if (ip->info->power)
+               ip->info->power(ip->info, 0);
+       for (i = 0; i < ip->input_devs->count; i++)
+               input_unregister_device(ip->input_devs->dev[i]);
+       kfree(ip);
+       return 0;
+}
+
+static struct platform_driver gpio_event_driver = {
+       .probe          = gpio_event_probe,
+       .remove         = gpio_event_remove,
+       .driver         = {
+               .name   = GPIO_EVENT_DEV_NAME,
+       },
+};
+
+module_platform_driver(gpio_event_driver);
+
+MODULE_DESCRIPTION("GPIO Event Driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/input/misc/gpio_input.c b/drivers/input/misc/gpio_input.c
new file mode 100644 (file)
index 0000000..eefd027
--- /dev/null
@@ -0,0 +1,390 @@
+/* drivers/input/misc/gpio_input.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/pm_wakeup.h>
+
+enum {
+       DEBOUNCE_UNSTABLE     = BIT(0), /* Got irq, while debouncing */
+       DEBOUNCE_PRESSED      = BIT(1),
+       DEBOUNCE_NOTPRESSED   = BIT(2),
+       DEBOUNCE_WAIT_IRQ     = BIT(3), /* Stable irq state */
+       DEBOUNCE_POLL         = BIT(4), /* Stable polling state */
+
+       DEBOUNCE_UNKNOWN =
+               DEBOUNCE_PRESSED | DEBOUNCE_NOTPRESSED,
+};
+
+struct gpio_key_state {
+       struct gpio_input_state *ds;
+       uint8_t debounce;
+};
+
+struct gpio_input_state {
+       struct gpio_event_input_devs *input_devs;
+       const struct gpio_event_input_info *info;
+       struct hrtimer timer;
+       int use_irq;
+       int debounce_count;
+       spinlock_t irq_lock;
+       struct wakeup_source *ws;
+       struct gpio_key_state key_state[0];
+};
+
+static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer)
+{
+       int i;
+       int pressed;
+       struct gpio_input_state *ds =
+               container_of(timer, struct gpio_input_state, timer);
+       unsigned gpio_flags = ds->info->flags;
+       unsigned npolarity;
+       int nkeys = ds->info->keymap_size;
+       const struct gpio_event_direct_entry *key_entry;
+       struct gpio_key_state *key_state;
+       unsigned long irqflags;
+       uint8_t debounce;
+       bool sync_needed;
+
+#if 0
+       key_entry = kp->keys_info->keymap;
+       key_state = kp->key_state;
+       for (i = 0; i < nkeys; i++, key_entry++, key_state++)
+               pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+                       gpio_read_detect_status(key_entry->gpio));
+#endif
+       key_entry = ds->info->keymap;
+       key_state = ds->key_state;
+       sync_needed = false;
+       spin_lock_irqsave(&ds->irq_lock, irqflags);
+       for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+               debounce = key_state->debounce;
+               if (debounce & DEBOUNCE_WAIT_IRQ)
+                       continue;
+               if (key_state->debounce & DEBOUNCE_UNSTABLE) {
+                       debounce = key_state->debounce = DEBOUNCE_UNKNOWN;
+                       enable_irq(gpio_to_irq(key_entry->gpio));
+                       if (gpio_flags & GPIOEDF_PRINT_KEY_UNSTABLE)
+                               pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+                                       "(%d) continue debounce\n",
+                                       ds->info->type, key_entry->code,
+                                       i, key_entry->gpio);
+               }
+               npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH);
+               pressed = gpio_get_value(key_entry->gpio) ^ npolarity;
+               if (debounce & DEBOUNCE_POLL) {
+                       if (pressed == !(debounce & DEBOUNCE_PRESSED)) {
+                               ds->debounce_count++;
+                               key_state->debounce = DEBOUNCE_UNKNOWN;
+                               if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+                                       pr_info("gpio_keys_scan_keys: key %x-"
+                                               "%x, %d (%d) start debounce\n",
+                                               ds->info->type, key_entry->code,
+                                               i, key_entry->gpio);
+                       }
+                       continue;
+               }
+               if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) {
+                       if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+                               pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+                                       "(%d) debounce pressed 1\n",
+                                       ds->info->type, key_entry->code,
+                                       i, key_entry->gpio);
+                       key_state->debounce = DEBOUNCE_PRESSED;
+                       continue;
+               }
+               if (!pressed && (debounce & DEBOUNCE_PRESSED)) {
+                       if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+                               pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+                                       "(%d) debounce pressed 0\n",
+                                       ds->info->type, key_entry->code,
+                                       i, key_entry->gpio);
+                       key_state->debounce = DEBOUNCE_NOTPRESSED;
+                       continue;
+               }
+               /* key is stable */
+               ds->debounce_count--;
+               if (ds->use_irq)
+                       key_state->debounce |= DEBOUNCE_WAIT_IRQ;
+               else
+                       key_state->debounce |= DEBOUNCE_POLL;
+               if (gpio_flags & GPIOEDF_PRINT_KEYS)
+                       pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) "
+                               "changed to %d\n", ds->info->type,
+                               key_entry->code, i, key_entry->gpio, pressed);
+               input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+                           key_entry->code, pressed);
+               sync_needed = true;
+       }
+       if (sync_needed) {
+               for (i = 0; i < ds->input_devs->count; i++)
+                       input_sync(ds->input_devs->dev[i]);
+       }
+
+#if 0
+       key_entry = kp->keys_info->keymap;
+       key_state = kp->key_state;
+       for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+               pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+                       gpio_read_detect_status(key_entry->gpio));
+       }
+#endif
+
+       if (ds->debounce_count)
+               hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL);
+       else if (!ds->use_irq)
+               hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL);
+       else
+               __pm_relax(ds->ws);
+
+       spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+       return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id)
+{
+       struct gpio_key_state *ks = dev_id;
+       struct gpio_input_state *ds = ks->ds;
+       int keymap_index = ks - ds->key_state;
+       const struct gpio_event_direct_entry *key_entry;
+       unsigned long irqflags;
+       int pressed;
+
+       if (!ds->use_irq)
+               return IRQ_HANDLED;
+
+       key_entry = &ds->info->keymap[keymap_index];
+
+       if (ds->info->debounce_time.tv64) {
+               spin_lock_irqsave(&ds->irq_lock, irqflags);
+               if (ks->debounce & DEBOUNCE_WAIT_IRQ) {
+                       ks->debounce = DEBOUNCE_UNKNOWN;
+                       if (ds->debounce_count++ == 0) {
+                               __pm_stay_awake(ds->ws);
+                               hrtimer_start(
+                                       &ds->timer, ds->info->debounce_time,
+                                       HRTIMER_MODE_REL);
+                       }
+                       if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+                               pr_info("gpio_event_input_irq_handler: "
+                                       "key %x-%x, %d (%d) start debounce\n",
+                                       ds->info->type, key_entry->code,
+                                       keymap_index, key_entry->gpio);
+               } else {
+                       disable_irq_nosync(irq);
+                       ks->debounce = DEBOUNCE_UNSTABLE;
+               }
+               spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+       } else {
+               pressed = gpio_get_value(key_entry->gpio) ^
+                       !(ds->info->flags & GPIOEDF_ACTIVE_HIGH);
+               if (ds->info->flags & GPIOEDF_PRINT_KEYS)
+                       pr_info("gpio_event_input_irq_handler: key %x-%x, %d "
+                               "(%d) changed to %d\n",
+                               ds->info->type, key_entry->code, keymap_index,
+                               key_entry->gpio, pressed);
+               input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+                           key_entry->code, pressed);
+               input_sync(ds->input_devs->dev[key_entry->dev]);
+       }
+       return IRQ_HANDLED;
+}
+
+static int gpio_event_input_request_irqs(struct gpio_input_state *ds)
+{
+       int i;
+       int err;
+       unsigned int irq;
+       unsigned long req_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+
+       for (i = 0; i < ds->info->keymap_size; i++) {
+               err = irq = gpio_to_irq(ds->info->keymap[i].gpio);
+               if (err < 0)
+                       goto err_gpio_get_irq_num_failed;
+               err = request_irq(irq, gpio_event_input_irq_handler,
+                                 req_flags, "gpio_keys", &ds->key_state[i]);
+               if (err) {
+                       pr_err("gpio_event_input_request_irqs: request_irq "
+                               "failed for input %d, irq %d\n",
+                               ds->info->keymap[i].gpio, irq);
+                       goto err_request_irq_failed;
+               }
+               if (ds->info->info.no_suspend) {
+                       err = enable_irq_wake(irq);
+                       if (err) {
+                               pr_err("gpio_event_input_request_irqs: "
+                                       "enable_irq_wake failed for input %d, "
+                                       "irq %d\n",
+                                       ds->info->keymap[i].gpio, irq);
+                               goto err_enable_irq_wake_failed;
+                       }
+               }
+       }
+       return 0;
+
+       for (i = ds->info->keymap_size - 1; i >= 0; i--) {
+               irq = gpio_to_irq(ds->info->keymap[i].gpio);
+               if (ds->info->info.no_suspend)
+                       disable_irq_wake(irq);
+err_enable_irq_wake_failed:
+               free_irq(irq, &ds->key_state[i]);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+               ;
+       }
+       return err;
+}
+
+int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+                       struct gpio_event_info *info, void **data, int func)
+{
+       int ret;
+       int i;
+       unsigned long irqflags;
+       struct gpio_event_input_info *di;
+       struct gpio_input_state *ds = *data;
+       char *wlname;
+
+       di = container_of(info, struct gpio_event_input_info, info);
+
+       if (func == GPIO_EVENT_FUNC_SUSPEND) {
+               if (ds->use_irq)
+                       for (i = 0; i < di->keymap_size; i++)
+                               disable_irq(gpio_to_irq(di->keymap[i].gpio));
+               hrtimer_cancel(&ds->timer);
+               return 0;
+       }
+       if (func == GPIO_EVENT_FUNC_RESUME) {
+               spin_lock_irqsave(&ds->irq_lock, irqflags);
+               if (ds->use_irq)
+                       for (i = 0; i < di->keymap_size; i++)
+                               enable_irq(gpio_to_irq(di->keymap[i].gpio));
+               hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+               spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+               return 0;
+       }
+
+       if (func == GPIO_EVENT_FUNC_INIT) {
+               if (ktime_to_ns(di->poll_time) <= 0)
+                       di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC);
+
+               *data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) *
+                                       di->keymap_size, GFP_KERNEL);
+               if (ds == NULL) {
+                       ret = -ENOMEM;
+                       pr_err("gpio_event_input_func: "
+                               "Failed to allocate private data\n");
+                       goto err_ds_alloc_failed;
+               }
+               ds->debounce_count = di->keymap_size;
+               ds->input_devs = input_devs;
+               ds->info = di;
+               wlname = kasprintf(GFP_KERNEL, "gpio_input:%s%s",
+                                  input_devs->dev[0]->name,
+                                  (input_devs->count > 1) ? "..." : "");
+
+               ds->ws = wakeup_source_register(wlname);
+               kfree(wlname);
+               if (!ds->ws) {
+                       ret = -ENOMEM;
+                       pr_err("gpio_event_input_func: "
+                               "Failed to allocate wakeup source\n");
+                       goto err_ws_failed;
+               }
+
+               spin_lock_init(&ds->irq_lock);
+
+               for (i = 0; i < di->keymap_size; i++) {
+                       int dev = di->keymap[i].dev;
+                       if (dev >= input_devs->count) {
+                               pr_err("gpio_event_input_func: bad device "
+                                       "index %d >= %d for key code %d\n",
+                                       dev, input_devs->count,
+                                       di->keymap[i].code);
+                               ret = -EINVAL;
+                               goto err_bad_keymap;
+                       }
+                       input_set_capability(input_devs->dev[dev], di->type,
+                                            di->keymap[i].code);
+                       ds->key_state[i].ds = ds;
+                       ds->key_state[i].debounce = DEBOUNCE_UNKNOWN;
+               }
+
+               for (i = 0; i < di->keymap_size; i++) {
+                       ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in");
+                       if (ret) {
+                               pr_err("gpio_event_input_func: gpio_request "
+                                       "failed for %d\n", di->keymap[i].gpio);
+                               goto err_gpio_request_failed;
+                       }
+                       ret = gpio_direction_input(di->keymap[i].gpio);
+                       if (ret) {
+                               pr_err("gpio_event_input_func: "
+                                       "gpio_direction_input failed for %d\n",
+                                       di->keymap[i].gpio);
+                               goto err_gpio_configure_failed;
+                       }
+               }
+
+               ret = gpio_event_input_request_irqs(ds);
+
+               spin_lock_irqsave(&ds->irq_lock, irqflags);
+               ds->use_irq = ret == 0;
+
+               pr_info("GPIO Input Driver: Start gpio inputs for %s%s in %s "
+                       "mode\n", input_devs->dev[0]->name,
+                       (input_devs->count > 1) ? "..." : "",
+                       ret == 0 ? "interrupt" : "polling");
+
+               hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+               ds->timer.function = gpio_event_input_timer_func;
+               hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+               spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+               return 0;
+       }
+
+       ret = 0;
+       spin_lock_irqsave(&ds->irq_lock, irqflags);
+       hrtimer_cancel(&ds->timer);
+       if (ds->use_irq) {
+               for (i = di->keymap_size - 1; i >= 0; i--) {
+                       int irq = gpio_to_irq(di->keymap[i].gpio);
+                       if (ds->info->info.no_suspend)
+                               disable_irq_wake(irq);
+                       free_irq(irq, &ds->key_state[i]);
+               }
+       }
+       spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+       for (i = di->keymap_size - 1; i >= 0; i--) {
+err_gpio_configure_failed:
+               gpio_free(di->keymap[i].gpio);
+err_gpio_request_failed:
+               ;
+       }
+err_bad_keymap:
+       wakeup_source_unregister(ds->ws);
+err_ws_failed:
+       kfree(ds);
+err_ds_alloc_failed:
+       return ret;
+}
diff --git a/drivers/input/misc/gpio_matrix.c b/drivers/input/misc/gpio_matrix.c
new file mode 100644 (file)
index 0000000..eaa9e89
--- /dev/null
@@ -0,0 +1,441 @@
+/* drivers/input/misc/gpio_matrix.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+struct gpio_kp {
+       struct gpio_event_input_devs *input_devs;
+       struct gpio_event_matrix_info *keypad_info;
+       struct hrtimer timer;
+       struct wake_lock wake_lock;
+       int current_output;
+       unsigned int use_irq:1;
+       unsigned int key_state_changed:1;
+       unsigned int last_key_state_changed:1;
+       unsigned int some_keys_pressed:2;
+       unsigned int disabled_irq:1;
+       unsigned long keys_pressed[0];
+};
+
+static void clear_phantom_key(struct gpio_kp *kp, int out, int in)
+{
+       struct gpio_event_matrix_info *mi = kp->keypad_info;
+       int key_index = out * mi->ninputs + in;
+       unsigned short keyentry = mi->keymap[key_index];
+       unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+       unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+       if (!test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+               if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+                       pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+                               "cleared\n", keycode, out, in,
+                               mi->output_gpios[out], mi->input_gpios[in]);
+               __clear_bit(key_index, kp->keys_pressed);
+       } else {
+               if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+                       pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+                               "not cleared\n", keycode, out, in,
+                               mi->output_gpios[out], mi->input_gpios[in]);
+       }
+}
+
+static int restore_keys_for_input(struct gpio_kp *kp, int out, int in)
+{
+       int rv = 0;
+       int key_index;
+
+       key_index = out * kp->keypad_info->ninputs + in;
+       while (out < kp->keypad_info->noutputs) {
+               if (test_bit(key_index, kp->keys_pressed)) {
+                       rv = 1;
+                       clear_phantom_key(kp, out, in);
+               }
+               key_index += kp->keypad_info->ninputs;
+               out++;
+       }
+       return rv;
+}
+
+static void remove_phantom_keys(struct gpio_kp *kp)
+{
+       int out, in, inp;
+       int key_index;
+
+       if (kp->some_keys_pressed < 3)
+               return;
+
+       for (out = 0; out < kp->keypad_info->noutputs; out++) {
+               inp = -1;
+               key_index = out * kp->keypad_info->ninputs;
+               for (in = 0; in < kp->keypad_info->ninputs; in++, key_index++) {
+                       if (test_bit(key_index, kp->keys_pressed)) {
+                               if (inp == -1) {
+                                       inp = in;
+                                       continue;
+                               }
+                               if (inp >= 0) {
+                                       if (!restore_keys_for_input(kp, out + 1,
+                                                                       inp))
+                                               break;
+                                       clear_phantom_key(kp, out, inp);
+                                       inp = -2;
+                               }
+                               restore_keys_for_input(kp, out, in);
+                       }
+               }
+       }
+}
+
+static void report_key(struct gpio_kp *kp, int key_index, int out, int in)
+{
+       struct gpio_event_matrix_info *mi = kp->keypad_info;
+       int pressed = test_bit(key_index, kp->keys_pressed);
+       unsigned short keyentry = mi->keymap[key_index];
+       unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+       unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+       if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+               if (keycode == KEY_RESERVED) {
+                       if (mi->flags & GPIOKPF_PRINT_UNMAPPED_KEYS)
+                               pr_info("gpiomatrix: unmapped key, %d-%d "
+                                       "(%d-%d) changed to %d\n",
+                                       out, in, mi->output_gpios[out],
+                                       mi->input_gpios[in], pressed);
+               } else {
+                       if (mi->flags & GPIOKPF_PRINT_MAPPED_KEYS)
+                               pr_info("gpiomatrix: key %x, %d-%d (%d-%d) "
+                                       "changed to %d\n", keycode,
+                                       out, in, mi->output_gpios[out],
+                                       mi->input_gpios[in], pressed);
+                       input_report_key(kp->input_devs->dev[dev], keycode, pressed);
+               }
+       }
+}
+
+static void report_sync(struct gpio_kp *kp)
+{
+       int i;
+
+       for (i = 0; i < kp->input_devs->count; i++)
+               input_sync(kp->input_devs->dev[i]);
+}
+
+static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
+{
+       int out, in;
+       int key_index;
+       int gpio;
+       struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
+       struct gpio_event_matrix_info *mi = kp->keypad_info;
+       unsigned gpio_keypad_flags = mi->flags;
+       unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);
+
+       out = kp->current_output;
+       if (out == mi->noutputs) {
+               out = 0;
+               kp->last_key_state_changed = kp->key_state_changed;
+               kp->key_state_changed = 0;
+               kp->some_keys_pressed = 0;
+       } else {
+               key_index = out * mi->ninputs;
+               for (in = 0; in < mi->ninputs; in++, key_index++) {
+                       gpio = mi->input_gpios[in];
+                       if (gpio_get_value(gpio) ^ !polarity) {
+                               if (kp->some_keys_pressed < 3)
+                                       kp->some_keys_pressed++;
+                               kp->key_state_changed |= !__test_and_set_bit(
+                                               key_index, kp->keys_pressed);
+                       } else
+                               kp->key_state_changed |= __test_and_clear_bit(
+                                               key_index, kp->keys_pressed);
+               }
+               gpio = mi->output_gpios[out];
+               if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+                       gpio_set_value(gpio, !polarity);
+               else
+                       gpio_direction_input(gpio);
+               out++;
+       }
+       kp->current_output = out;
+       if (out < mi->noutputs) {
+               gpio = mi->output_gpios[out];
+               if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+                       gpio_set_value(gpio, polarity);
+               else
+                       gpio_direction_output(gpio, polarity);
+               hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
+               return HRTIMER_NORESTART;
+       }
+       if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
+               if (kp->key_state_changed) {
+                       hrtimer_start(&kp->timer, mi->debounce_delay,
+                                     HRTIMER_MODE_REL);
+                       return HRTIMER_NORESTART;
+               }
+               kp->key_state_changed = kp->last_key_state_changed;
+       }
+       if (kp->key_state_changed) {
+               if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
+                       remove_phantom_keys(kp);
+               key_index = 0;
+               for (out = 0; out < mi->noutputs; out++)
+                       for (in = 0; in < mi->ninputs; in++, key_index++)
+                               report_key(kp, key_index, out, in);
+               report_sync(kp);
+       }
+       if (!kp->use_irq || kp->some_keys_pressed) {
+               hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
+               return HRTIMER_NORESTART;
+       }
+
+       /* No keys are pressed, reenable interrupt */
+       for (out = 0; out < mi->noutputs; out++) {
+               if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+                       gpio_set_value(mi->output_gpios[out], polarity);
+               else
+                       gpio_direction_output(mi->output_gpios[out], polarity);
+       }
+       for (in = 0; in < mi->ninputs; in++)
+               enable_irq(gpio_to_irq(mi->input_gpios[in]));
+       wake_unlock(&kp->wake_lock);
+       return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id)
+{
+       int i;
+       struct gpio_kp *kp = dev_id;
+       struct gpio_event_matrix_info *mi = kp->keypad_info;
+       unsigned gpio_keypad_flags = mi->flags;
+
+       if (!kp->use_irq) {
+               /* ignore interrupt while registering the handler */
+               kp->disabled_irq = 1;
+               disable_irq_nosync(irq_in);
+               return IRQ_HANDLED;
+       }
+
+       for (i = 0; i < mi->ninputs; i++)
+               disable_irq_nosync(gpio_to_irq(mi->input_gpios[i]));
+       for (i = 0; i < mi->noutputs; i++) {
+               if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+                       gpio_set_value(mi->output_gpios[i],
+                               !(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH));
+               else
+                       gpio_direction_input(mi->output_gpios[i]);
+       }
+       wake_lock(&kp->wake_lock);
+       hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+       return IRQ_HANDLED;
+}
+
+static int gpio_keypad_request_irqs(struct gpio_kp *kp)
+{
+       int i;
+       int err;
+       unsigned int irq;
+       unsigned long request_flags;
+       struct gpio_event_matrix_info *mi = kp->keypad_info;
+
+       switch (mi->flags & (GPIOKPF_ACTIVE_HIGH|GPIOKPF_LEVEL_TRIGGERED_IRQ)) {
+       default:
+               request_flags = IRQF_TRIGGER_FALLING;
+               break;
+       case GPIOKPF_ACTIVE_HIGH:
+               request_flags = IRQF_TRIGGER_RISING;
+               break;
+       case GPIOKPF_LEVEL_TRIGGERED_IRQ:
+               request_flags = IRQF_TRIGGER_LOW;
+               break;
+       case GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_ACTIVE_HIGH:
+               request_flags = IRQF_TRIGGER_HIGH;
+               break;
+       }
+
+       for (i = 0; i < mi->ninputs; i++) {
+               err = irq = gpio_to_irq(mi->input_gpios[i]);
+               if (err < 0)
+                       goto err_gpio_get_irq_num_failed;
+               err = request_irq(irq, gpio_keypad_irq_handler, request_flags,
+                                 "gpio_kp", kp);
+               if (err) {
+                       pr_err("gpiomatrix: request_irq failed for input %d, "
+                               "irq %d\n", mi->input_gpios[i], irq);
+                       goto err_request_irq_failed;
+               }
+               err = enable_irq_wake(irq);
+               if (err) {
+                       pr_err("gpiomatrix: set_irq_wake failed for input %d, "
+                               "irq %d\n", mi->input_gpios[i], irq);
+               }
+               disable_irq(irq);
+               if (kp->disabled_irq) {
+                       kp->disabled_irq = 0;
+                       enable_irq(irq);
+               }
+       }
+       return 0;
+
+       for (i = mi->noutputs - 1; i >= 0; i--) {
+               free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+               ;
+       }
+       return err;
+}
+
+int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+       struct gpio_event_info *info, void **data, int func)
+{
+       int i;
+       int err;
+       int key_count;
+       struct gpio_kp *kp;
+       struct gpio_event_matrix_info *mi;
+
+       mi = container_of(info, struct gpio_event_matrix_info, info);
+       if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) {
+               /* TODO: disable scanning */
+               return 0;
+       }
+
+       if (func == GPIO_EVENT_FUNC_INIT) {
+               if (mi->keymap == NULL ||
+                  mi->input_gpios == NULL ||
+                  mi->output_gpios == NULL) {
+                       err = -ENODEV;
+                       pr_err("gpiomatrix: Incomplete pdata\n");
+                       goto err_invalid_platform_data;
+               }
+               key_count = mi->ninputs * mi->noutputs;
+
+               *data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) *
+                                    BITS_TO_LONGS(key_count), GFP_KERNEL);
+               if (kp == NULL) {
+                       err = -ENOMEM;
+                       pr_err("gpiomatrix: Failed to allocate private data\n");
+                       goto err_kp_alloc_failed;
+               }
+               kp->input_devs = input_devs;
+               kp->keypad_info = mi;
+               for (i = 0; i < key_count; i++) {
+                       unsigned short keyentry = mi->keymap[i];
+                       unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+                       unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+                       if (dev >= input_devs->count) {
+                               pr_err("gpiomatrix: bad device index %d >= "
+                                       "%d for key code %d\n",
+                                       dev, input_devs->count, keycode);
+                               err = -EINVAL;
+                               goto err_bad_keymap;
+                       }
+                       if (keycode && keycode <= KEY_MAX)
+                               input_set_capability(input_devs->dev[dev],
+                                                       EV_KEY, keycode);
+               }
+
+               for (i = 0; i < mi->noutputs; i++) {
+                       err = gpio_request(mi->output_gpios[i], "gpio_kp_out");
+                       if (err) {
+                               pr_err("gpiomatrix: gpio_request failed for "
+                                       "output %d\n", mi->output_gpios[i]);
+                               goto err_request_output_gpio_failed;
+                       }
+                       if (gpio_cansleep(mi->output_gpios[i])) {
+                               pr_err("gpiomatrix: unsupported output gpio %d,"
+                                       " can sleep\n", mi->output_gpios[i]);
+                               err = -EINVAL;
+                               goto err_output_gpio_configure_failed;
+                       }
+                       if (mi->flags & GPIOKPF_DRIVE_INACTIVE)
+                               err = gpio_direction_output(mi->output_gpios[i],
+                                       !(mi->flags & GPIOKPF_ACTIVE_HIGH));
+                       else
+                               err = gpio_direction_input(mi->output_gpios[i]);
+                       if (err) {
+                               pr_err("gpiomatrix: gpio_configure failed for "
+                                       "output %d\n", mi->output_gpios[i]);
+                               goto err_output_gpio_configure_failed;
+                       }
+               }
+               for (i = 0; i < mi->ninputs; i++) {
+                       err = gpio_request(mi->input_gpios[i], "gpio_kp_in");
+                       if (err) {
+                               pr_err("gpiomatrix: gpio_request failed for "
+                                       "input %d\n", mi->input_gpios[i]);
+                               goto err_request_input_gpio_failed;
+                       }
+                       err = gpio_direction_input(mi->input_gpios[i]);
+                       if (err) {
+                               pr_err("gpiomatrix: gpio_direction_input failed"
+                                       " for input %d\n", mi->input_gpios[i]);
+                               goto err_gpio_direction_input_failed;
+                       }
+               }
+               kp->current_output = mi->noutputs;
+               kp->key_state_changed = 1;
+
+               hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+               kp->timer.function = gpio_keypad_timer_func;
+               wake_lock_init(&kp->wake_lock, WAKE_LOCK_SUSPEND, "gpio_kp");
+               err = gpio_keypad_request_irqs(kp);
+               kp->use_irq = err == 0;
+
+               pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for "
+                       "%s%s in %s mode\n", input_devs->dev[0]->name,
+                       (input_devs->count > 1) ? "..." : "",
+                       kp->use_irq ? "interrupt" : "polling");
+
+               if (kp->use_irq)
+                       wake_lock(&kp->wake_lock);
+               hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+               return 0;
+       }
+
+       err = 0;
+       kp = *data;
+
+       if (kp->use_irq)
+               for (i = mi->noutputs - 1; i >= 0; i--)
+                       free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+
+       hrtimer_cancel(&kp->timer);
+       wake_lock_destroy(&kp->wake_lock);
+       for (i = mi->noutputs - 1; i >= 0; i--) {
+err_gpio_direction_input_failed:
+               gpio_free(mi->input_gpios[i]);
+err_request_input_gpio_failed:
+               ;
+       }
+       for (i = mi->noutputs - 1; i >= 0; i--) {
+err_output_gpio_configure_failed:
+               gpio_free(mi->output_gpios[i]);
+err_request_output_gpio_failed:
+               ;
+       }
+err_bad_keymap:
+       kfree(kp);
+err_kp_alloc_failed:
+err_invalid_platform_data:
+       return err;
+}
diff --git a/drivers/input/misc/gpio_output.c b/drivers/input/misc/gpio_output.c
new file mode 100644 (file)
index 0000000..2aac2fa
--- /dev/null
@@ -0,0 +1,97 @@
+/* drivers/input/misc/gpio_output.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+
+int gpio_event_output_event(
+       struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+       void **data, unsigned int dev, unsigned int type,
+       unsigned int code, int value)
+{
+       int i;
+       struct gpio_event_output_info *oi;
+       oi = container_of(info, struct gpio_event_output_info, info);
+       if (type != oi->type)
+               return 0;
+       if (!(oi->flags & GPIOEDF_ACTIVE_HIGH))
+               value = !value;
+       for (i = 0; i < oi->keymap_size; i++)
+               if (dev == oi->keymap[i].dev && code == oi->keymap[i].code)
+                       gpio_set_value(oi->keymap[i].gpio, value);
+       return 0;
+}
+
+int gpio_event_output_func(
+       struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+       void **data, int func)
+{
+       int ret;
+       int i;
+       struct gpio_event_output_info *oi;
+       oi = container_of(info, struct gpio_event_output_info, info);
+
+       if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME)
+               return 0;
+
+       if (func == GPIO_EVENT_FUNC_INIT) {
+               int output_level = !(oi->flags & GPIOEDF_ACTIVE_HIGH);
+
+               for (i = 0; i < oi->keymap_size; i++) {
+                       int dev = oi->keymap[i].dev;
+                       if (dev >= input_devs->count) {
+                               pr_err("gpio_event_output_func: bad device "
+                                       "index %d >= %d for key code %d\n",
+                                       dev, input_devs->count,
+                                       oi->keymap[i].code);
+                               ret = -EINVAL;
+                               goto err_bad_keymap;
+                       }
+                       input_set_capability(input_devs->dev[dev], oi->type,
+                                            oi->keymap[i].code);
+               }
+
+               for (i = 0; i < oi->keymap_size; i++) {
+                       ret = gpio_request(oi->keymap[i].gpio,
+                                          "gpio_event_output");
+                       if (ret) {
+                               pr_err("gpio_event_output_func: gpio_request "
+                                       "failed for %d\n", oi->keymap[i].gpio);
+                               goto err_gpio_request_failed;
+                       }
+                       ret = gpio_direction_output(oi->keymap[i].gpio,
+                                                   output_level);
+                       if (ret) {
+                               pr_err("gpio_event_output_func: "
+                                       "gpio_direction_output failed for %d\n",
+                                       oi->keymap[i].gpio);
+                               goto err_gpio_direction_output_failed;
+                       }
+               }
+               return 0;
+       }
+
+       ret = 0;
+       for (i = oi->keymap_size - 1; i >= 0; i--) {
+err_gpio_direction_output_failed:
+               gpio_free(oi->keymap[i].gpio);
+err_gpio_request_failed:
+               ;
+       }
+err_bad_keymap:
+       return ret;
+}
+
diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c
new file mode 100644 (file)
index 0000000..a5ea27a
--- /dev/null
@@ -0,0 +1,391 @@
+/*
+ *  drivers/input/misc/keychord.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/keychord.h>
+#include <linux/sched.h>
+
+#define KEYCHORD_NAME          "keychord"
+#define BUFFER_SIZE                    16
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Key chord input driver");
+MODULE_SUPPORTED_DEVICE("keychord");
+MODULE_LICENSE("GPL");
+
+#define NEXT_KEYCHORD(kc) ((struct input_keychord *) \
+               ((char *)kc + sizeof(struct input_keychord) + \
+               kc->count * sizeof(kc->keycodes[0])))
+
+struct keychord_device {
+       struct input_handler    input_handler;
+       int                     registered;
+
+       /* list of keychords to monitor */
+       struct input_keychord   *keychords;
+       int                     keychord_count;
+
+       /* bitmask of keys contained in our keychords */
+       unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+       /* current state of the keys */
+       unsigned long keystate[BITS_TO_LONGS(KEY_CNT)];
+       /* number of keys that are currently pressed */
+       int key_down;
+
+       /* second input_device_id is needed for null termination */
+       struct input_device_id  device_ids[2];
+
+       spinlock_t              lock;
+       wait_queue_head_t       waitq;
+       unsigned char           head;
+       unsigned char           tail;
+       __u16                   buff[BUFFER_SIZE];
+};
+
+static int check_keychord(struct keychord_device *kdev,
+               struct input_keychord *keychord)
+{
+       int i;
+
+       if (keychord->count != kdev->key_down)
+               return 0;
+
+       for (i = 0; i < keychord->count; i++) {
+               if (!test_bit(keychord->keycodes[i], kdev->keystate))
+                       return 0;
+       }
+
+       /* we have a match */
+       return 1;
+}
+
+static void keychord_event(struct input_handle *handle, unsigned int type,
+                          unsigned int code, int value)
+{
+       struct keychord_device *kdev = handle->private;
+       struct input_keychord *keychord;
+       unsigned long flags;
+       int i, got_chord = 0;
+
+       if (type != EV_KEY || code >= KEY_MAX)
+               return;
+
+       spin_lock_irqsave(&kdev->lock, flags);
+       /* do nothing if key state did not change */
+       if (!test_bit(code, kdev->keystate) == !value)
+               goto done;
+       __change_bit(code, kdev->keystate);
+       if (value)
+               kdev->key_down++;
+       else
+               kdev->key_down--;
+
+       /* don't notify on key up */
+       if (!value)
+               goto done;
+       /* ignore this event if it is not one of the keys we are monitoring */
+       if (!test_bit(code, kdev->keybit))
+               goto done;
+
+       keychord = kdev->keychords;
+       if (!keychord)
+               goto done;
+
+       /* check to see if the keyboard state matches any keychords */
+       for (i = 0; i < kdev->keychord_count; i++) {
+               if (check_keychord(kdev, keychord)) {
+                       kdev->buff[kdev->head] = keychord->id;
+                       kdev->head = (kdev->head + 1) % BUFFER_SIZE;
+                       got_chord = 1;
+                       break;
+               }
+               /* skip to next keychord */
+               keychord = NEXT_KEYCHORD(keychord);
+       }
+
+done:
+       spin_unlock_irqrestore(&kdev->lock, flags);
+
+       if (got_chord) {
+               pr_info("keychord: got keychord id %d. Any tasks: %d\n",
+                       keychord->id,
+                       !list_empty_careful(&kdev->waitq.task_list));
+               wake_up_interruptible(&kdev->waitq);
+       }
+}
+
+static int keychord_connect(struct input_handler *handler,
+                                         struct input_dev *dev,
+                                         const struct input_device_id *id)
+{
+       int i, ret;
+       struct input_handle *handle;
+       struct keychord_device *kdev =
+               container_of(handler, struct keychord_device, input_handler);
+
+       /*
+        * ignore this input device if it does not contain any keycodes
+        * that we are monitoring
+        */
+       for (i = 0; i < KEY_MAX; i++) {
+               if (test_bit(i, kdev->keybit) && test_bit(i, dev->keybit))
+                       break;
+       }
+       if (i == KEY_MAX)
+               return -ENODEV;
+
+       handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+       if (!handle)
+               return -ENOMEM;
+
+       handle->dev = dev;
+       handle->handler = handler;
+       handle->name = KEYCHORD_NAME;
+       handle->private = kdev;
+
+       ret = input_register_handle(handle);
+       if (ret)
+               goto err_input_register_handle;
+
+       ret = input_open_device(handle);
+       if (ret)
+               goto err_input_open_device;
+
+       pr_info("keychord: using input dev %s for fevent\n", dev->name);
+
+       return 0;
+
+err_input_open_device:
+       input_unregister_handle(handle);
+err_input_register_handle:
+       kfree(handle);
+       return ret;
+}
+
+static void keychord_disconnect(struct input_handle *handle)
+{
+       input_close_device(handle);
+       input_unregister_handle(handle);
+       kfree(handle);
+}
+
+/*
+ * keychord_read is used to read keychord events from the driver
+ */
+static ssize_t keychord_read(struct file *file, char __user *buffer,
+               size_t count, loff_t *ppos)
+{
+       struct keychord_device *kdev = file->private_data;
+       __u16   id;
+       int retval;
+       unsigned long flags;
+
+       if (count < sizeof(id))
+               return -EINVAL;
+       count = sizeof(id);
+
+       if (kdev->head == kdev->tail && (file->f_flags & O_NONBLOCK))
+               return -EAGAIN;
+
+       retval = wait_event_interruptible(kdev->waitq,
+                       kdev->head != kdev->tail);
+       if (retval)
+               return retval;
+
+       spin_lock_irqsave(&kdev->lock, flags);
+       /* pop a keychord ID off the queue */
+       id = kdev->buff[kdev->tail];
+       kdev->tail = (kdev->tail + 1) % BUFFER_SIZE;
+       spin_unlock_irqrestore(&kdev->lock, flags);
+
+       if (copy_to_user(buffer, &id, count))
+               return -EFAULT;
+
+       return count;
+}
+
+/*
+ * keychord_write is used to configure the driver
+ */
+static ssize_t keychord_write(struct file *file, const char __user *buffer,
+               size_t count, loff_t *ppos)
+{
+       struct keychord_device *kdev = file->private_data;
+       struct input_keychord *keychords = 0;
+       struct input_keychord *keychord, *next, *end;
+       int ret, i, key;
+       unsigned long flags;
+
+       if (count < sizeof(struct input_keychord))
+               return -EINVAL;
+       keychords = kzalloc(count, GFP_KERNEL);
+       if (!keychords)
+               return -ENOMEM;
+
+       /* read list of keychords from userspace */
+       if (copy_from_user(keychords, buffer, count)) {
+               kfree(keychords);
+               return -EFAULT;
+       }
+
+       /* unregister handler before changing configuration */
+       if (kdev->registered) {
+               input_unregister_handler(&kdev->input_handler);
+               kdev->registered = 0;
+       }
+
+       spin_lock_irqsave(&kdev->lock, flags);
+       /* clear any existing configuration */
+       kfree(kdev->keychords);
+       kdev->keychords = 0;
+       kdev->keychord_count = 0;
+       kdev->key_down = 0;
+       memset(kdev->keybit, 0, sizeof(kdev->keybit));
+       memset(kdev->keystate, 0, sizeof(kdev->keystate));
+       kdev->head = kdev->tail = 0;
+
+       keychord = keychords;
+       end = (struct input_keychord *)((char *)keychord + count);
+
+       while (keychord < end) {
+               next = NEXT_KEYCHORD(keychord);
+               if (keychord->count <= 0 || next > end) {
+                       pr_err("keychord: invalid keycode count %d\n",
+                               keychord->count);
+                       goto err_unlock_return;
+               }
+               if (keychord->version != KEYCHORD_VERSION) {
+                       pr_err("keychord: unsupported version %d\n",
+                               keychord->version);
+                       goto err_unlock_return;
+               }
+
+               /* keep track of the keys we are monitoring in keybit */
+               for (i = 0; i < keychord->count; i++) {
+                       key = keychord->keycodes[i];
+                       if (key < 0 || key >= KEY_CNT) {
+                               pr_err("keychord: keycode %d out of range\n",
+                                       key);
+                               goto err_unlock_return;
+                       }
+                       __set_bit(key, kdev->keybit);
+               }
+
+               kdev->keychord_count++;
+               keychord = next;
+       }
+
+       kdev->keychords = keychords;
+       spin_unlock_irqrestore(&kdev->lock, flags);
+
+       ret = input_register_handler(&kdev->input_handler);
+       if (ret) {
+               kfree(keychords);
+               kdev->keychords = 0;
+               return ret;
+       }
+       kdev->registered = 1;
+
+       return count;
+
+err_unlock_return:
+       spin_unlock_irqrestore(&kdev->lock, flags);
+       kfree(keychords);
+       return -EINVAL;
+}
+
+static unsigned int keychord_poll(struct file *file, poll_table *wait)
+{
+       struct keychord_device *kdev = file->private_data;
+
+       poll_wait(file, &kdev->waitq, wait);
+
+       if (kdev->head != kdev->tail)
+               return POLLIN | POLLRDNORM;
+
+       return 0;
+}
+
+static int keychord_open(struct inode *inode, struct file *file)
+{
+       struct keychord_device *kdev;
+
+       kdev = kzalloc(sizeof(struct keychord_device), GFP_KERNEL);
+       if (!kdev)
+               return -ENOMEM;
+
+       spin_lock_init(&kdev->lock);
+       init_waitqueue_head(&kdev->waitq);
+
+       kdev->input_handler.event = keychord_event;
+       kdev->input_handler.connect = keychord_connect;
+       kdev->input_handler.disconnect = keychord_disconnect;
+       kdev->input_handler.name = KEYCHORD_NAME;
+       kdev->input_handler.id_table = kdev->device_ids;
+
+       kdev->device_ids[0].flags = INPUT_DEVICE_ID_MATCH_EVBIT;
+       __set_bit(EV_KEY, kdev->device_ids[0].evbit);
+
+       file->private_data = kdev;
+
+       return 0;
+}
+
+static int keychord_release(struct inode *inode, struct file *file)
+{
+       struct keychord_device *kdev = file->private_data;
+
+       if (kdev->registered)
+               input_unregister_handler(&kdev->input_handler);
+       kfree(kdev);
+
+       return 0;
+}
+
+static const struct file_operations keychord_fops = {
+       .owner          = THIS_MODULE,
+       .open           = keychord_open,
+       .release        = keychord_release,
+       .read           = keychord_read,
+       .write          = keychord_write,
+       .poll           = keychord_poll,
+};
+
+static struct miscdevice keychord_misc = {
+       .fops           = &keychord_fops,
+       .name           = KEYCHORD_NAME,
+       .minor          = MISC_DYNAMIC_MINOR,
+};
+
+static int __init keychord_init(void)
+{
+       return misc_register(&keychord_misc);
+}
+
+static void __exit keychord_exit(void)
+{
+       misc_deregister(&keychord_misc);
+}
+
+module_init(keychord_init);
+module_exit(keychord_exit);
index c002d8660e3053b056220018f5b25b81eccaade5..3f5743424ff754a6e3d6408df57e71460b457a1c 100644 (file)
@@ -424,6 +424,10 @@ config TI_DAC7512
          This driver can also be built as a module. If so, the module
          will be called ti_dac7512.
 
+config UID_STAT
+       bool "UID based statistics tracking exported to /proc/uid_stat"
+       default n
+
 config VMWARE_BALLOON
        tristate "VMware Balloon Driver"
        depends on X86 && HYPERVISOR_GUEST
index c235d5b683111534a39996b75d8832f063ea0ae9..a57666cec342cab6c1a840d03cb26eef49e8ea64 100644 (file)
@@ -36,6 +36,7 @@ obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
 obj-$(CONFIG_EP93XX_PWM)       += ep93xx_pwm.o
 obj-$(CONFIG_DS1682)           += ds1682.o
 obj-$(CONFIG_TI_DAC7512)       += ti_dac7512.o
+obj-$(CONFIG_UID_STAT)         += uid_stat.o
 obj-$(CONFIG_C2PORT)           += c2port/
 obj-$(CONFIG_HMC6352)          += hmc6352.o
 obj-y                          += eeprom/
diff --git a/drivers/misc/uid_stat.c b/drivers/misc/uid_stat.c
new file mode 100644 (file)
index 0000000..4766c1f
--- /dev/null
@@ -0,0 +1,152 @@
+/* drivers/misc/uid_stat.c
+ *
+ * Copyright (C) 2008 - 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/atomic.h>
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/uid_stat.h>
+#include <net/activity_stats.h>
+
+static DEFINE_SPINLOCK(uid_lock);
+static LIST_HEAD(uid_list);
+static struct proc_dir_entry *parent;
+
+struct uid_stat {
+       struct list_head link;
+       uid_t uid;
+       atomic_t tcp_rcv;
+       atomic_t tcp_snd;
+};
+
+static struct uid_stat *find_uid_stat(uid_t uid) {
+       struct uid_stat *entry;
+
+       list_for_each_entry(entry, &uid_list, link) {
+               if (entry->uid == uid) {
+                       return entry;
+               }
+       }
+       return NULL;
+}
+
+static int uid_stat_atomic_int_show(struct seq_file *m, void *v)
+{
+       unsigned int bytes;
+       atomic_t *counter = m->private;
+
+       bytes = (unsigned int) (atomic_read(counter) + INT_MIN);
+       return seq_printf(m, "%u\n", bytes);
+}
+
+static int uid_stat_read_atomic_int_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, uid_stat_atomic_int_show, PDE_DATA(inode));
+}
+
+static const struct file_operations uid_stat_read_atomic_int_fops = {
+       .open           = uid_stat_read_atomic_int_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+
+/* Create a new entry for tracking the specified uid. */
+static struct uid_stat *create_stat(uid_t uid) {
+       struct uid_stat *new_uid;
+       /* Create the uid stat struct and append it to the list. */
+       new_uid = kmalloc(sizeof(struct uid_stat), GFP_ATOMIC);
+       if (!new_uid)
+               return NULL;
+
+       new_uid->uid = uid;
+       /* Counters start at INT_MIN, so we can track 4GB of network traffic. */
+       atomic_set(&new_uid->tcp_rcv, INT_MIN);
+       atomic_set(&new_uid->tcp_snd, INT_MIN);
+
+       list_add_tail(&new_uid->link, &uid_list);
+       return new_uid;
+}
+
+static void create_stat_proc(struct uid_stat *new_uid)
+{
+       char uid_s[32];
+       struct proc_dir_entry *entry;
+       sprintf(uid_s, "%d", new_uid->uid);
+       entry = proc_mkdir(uid_s, parent);
+
+       /* Keep reference to uid_stat so we know what uid to read stats from. */
+       proc_create_data("tcp_snd", S_IRUGO, entry,
+                        &uid_stat_read_atomic_int_fops, &new_uid->tcp_snd);
+
+       proc_create_data("tcp_rcv", S_IRUGO, entry,
+                        &uid_stat_read_atomic_int_fops, &new_uid->tcp_rcv);
+}
+
+static struct uid_stat *find_or_create_uid_stat(uid_t uid)
+{
+       struct uid_stat *entry;
+       unsigned long flags;
+       spin_lock_irqsave(&uid_lock, flags);
+       entry = find_uid_stat(uid);
+       if (entry) {
+               spin_unlock_irqrestore(&uid_lock, flags);
+               return entry;
+       }
+       entry = create_stat(uid);
+       spin_unlock_irqrestore(&uid_lock, flags);
+       if (entry)
+               create_stat_proc(entry);
+       return entry;
+}
+
+int uid_stat_tcp_snd(uid_t uid, int size) {
+       struct uid_stat *entry;
+       activity_stats_update();
+       entry = find_or_create_uid_stat(uid);
+       if (!entry)
+               return -1;
+       atomic_add(size, &entry->tcp_snd);
+       return 0;
+}
+
+int uid_stat_tcp_rcv(uid_t uid, int size) {
+       struct uid_stat *entry;
+       activity_stats_update();
+       entry = find_or_create_uid_stat(uid);
+       if (!entry)
+               return -1;
+       atomic_add(size, &entry->tcp_rcv);
+       return 0;
+}
+
+static int __init uid_stat_init(void)
+{
+       parent = proc_mkdir("uid_stat", NULL);
+       if (!parent) {
+               pr_err("uid_stat: failed to create proc entry\n");
+               return -1;
+       }
+       return 0;
+}
+
+__initcall(uid_stat_init);
index 5562308699bc292d5dce1e63fdea2452c38164f5..79d82124413f1c13e6e9d9df8d40a76e5026c5d0 100644 (file)
@@ -50,6 +50,15 @@ config MMC_BLOCK_BOUNCE
 
          If unsure, say Y here.
 
+config MMC_BLOCK_DEFERRED_RESUME
+       bool "Deferr MMC layer resume until I/O is requested"
+       depends on MMC_BLOCK
+       default n
+       help
+         Say Y here to enable deferred MMC resume until I/O
+         is requested. This will reduce overall resume latency and
+         save power when theres an SD card inserted but not being used.
+
 config SDIO_UART
        tristate "SDIO UART/GPS class support"
        depends on TTY
index 9aca9462a12fa0e056d877040abf0b06c0667ae9..9cf08651d2bf805c587ddfcd08acc2dea0c06507 100644 (file)
@@ -35,6 +35,9 @@
 #include <linux/capability.h>
 #include <linux/compat.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/mmc.h>
+
 #include <linux/mmc/ioctl.h>
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -163,11 +166,7 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 
 static inline int mmc_get_devidx(struct gendisk *disk)
 {
-       int devmaj = MAJOR(disk_devt(disk));
-       int devidx = MINOR(disk_devt(disk)) / perdev_minors;
-
-       if (!devmaj)
-               devidx = disk->first_minor / perdev_minors;
+       int devidx = disk->first_minor / perdev_minors;
        return devidx;
 }
 
@@ -728,18 +727,22 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
                        req->rq_disk->disk_name, "timed out", name, status);
 
                /* If the status cmd initially failed, retry the r/w cmd */
-               if (!status_valid)
+               if (!status_valid) {
+                       pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
                        return ERR_RETRY;
-
+               }
                /*
                 * If it was a r/w cmd crc error, or illegal command
                 * (eg, issued in wrong state) then retry - we should
                 * have corrected the state problem above.
                 */
-               if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+               if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+                       pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
                        return ERR_RETRY;
+               }
 
                /* Otherwise abort the command */
+               pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
                return ERR_ABORT;
 
        default:
@@ -1019,9 +1022,12 @@ retry:
                        goto out;
        }
 
-       if (mmc_can_sanitize(card))
+       if (mmc_can_sanitize(card)) {
+               trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                 EXT_CSD_SANITIZE_START, 1, 0);
+               trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
+       }
 out_retry:
        if (err && !mmc_blk_reset(md, card->host, type))
                goto retry;
@@ -1933,6 +1939,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
        unsigned long flags;
        unsigned int cmd_flags = req ? req->cmd_flags : 0;
 
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+       if (mmc_bus_needs_resume(card->host))
+               mmc_resume_bus(card->host);
+#endif
+
        if (req && !mq->mqrq_prev->req)
                /* claim host only for the first request */
                mmc_claim_host(card->host);
@@ -2055,6 +2066,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
        md->disk->queue = md->queue.queue;
        md->disk->driverfs_dev = parent;
        set_disk_ro(md->disk, md->read_only || default_ro);
+       md->disk->flags = GENHD_FL_EXT_DEVT;
        if (area_type & MMC_BLK_DATA_AREA_RPMB)
                md->disk->flags |= GENHD_FL_NO_PART_SCAN;
 
@@ -2369,6 +2381,9 @@ static int mmc_blk_probe(struct mmc_card *card)
        mmc_set_drvdata(card, md);
        mmc_fixup_device(card, blk_fixups);
 
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+       mmc_set_bus_resume_policy(card->host, 1);
+#endif
        if (mmc_add_disk(md))
                goto out;
 
@@ -2394,6 +2409,9 @@ static void mmc_blk_remove(struct mmc_card *card)
        mmc_release_host(card->host);
        mmc_blk_remove_req(md);
        mmc_set_drvdata(card, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+       mmc_set_bus_resume_policy(card->host, 0);
+#endif
 }
 
 #ifdef CONFIG_PM
index 269d072ef55eabc239a4f1ef55980a89918d2186..ae10a372af0f92050f8d698f6a650a38a604b9b8 100644 (file)
@@ -26,3 +26,18 @@ config MMC_CLKGATE
          support handling this in order for it to be of any use.
 
          If unsure, say N.
+
+config MMC_EMBEDDED_SDIO
+       boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
+       help
+         If you say Y here, support will be added for embedded SDIO
+         devices which do not contain the necessary enumeration
+         support in hardware to be properly detected.
+
+config MMC_PARANOID_SD_INIT
+       bool "Enable paranoid SD card initialization (EXPERIMENTAL)"
+       help
+         If you say Y here, the MMC layer will be extra paranoid
+         about re-trying SD init requests. This can be a useful
+         work-around for buggy controllers and hardware. Enable
+         if you are experiencing issues with SD detection.
index c40396f23202d607a089f414a6afa224d16d0948..6a83f4ccc1087b2b2de8a929e8e544baba580acc 100644 (file)
@@ -27,6 +27,9 @@
 #include <linux/fault-inject.h>
 #include <linux/random.h>
 #include <linux/slab.h>
+#include <linux/wakelock.h>
+
+#include <trace/events/mmc.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -172,6 +175,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
                        pr_debug("%s:     %d bytes transferred: %d\n",
                                mmc_hostname(host),
                                mrq->data->bytes_xfered, mrq->data->error);
+                       trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
                }
 
                if (mrq->stop) {
@@ -536,8 +540,12 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
                        mmc_start_bkops(host->card, true);
        }
 
-       if (!err && areq)
+       if (!err && areq) {
+               trace_mmc_blk_rw_start(areq->mrq->cmd->opcode,
+                                      areq->mrq->cmd->arg,
+                                      areq->mrq->data);
                start_err = __mmc_start_data_req(host, areq->mrq);
+       }
 
        if (host->areq)
                mmc_post_req(host, host->areq->mrq, 0);
@@ -1591,6 +1599,36 @@ static inline void mmc_bus_put(struct mmc_host *host)
        spin_unlock_irqrestore(&host->lock, flags);
 }
 
+int mmc_resume_bus(struct mmc_host *host)
+{
+       unsigned long flags;
+
+       if (!mmc_bus_needs_resume(host))
+               return -EINVAL;
+
+       printk("%s: Starting deferred resume\n", mmc_hostname(host));
+       spin_lock_irqsave(&host->lock, flags);
+       host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+       host->rescan_disable = 0;
+       spin_unlock_irqrestore(&host->lock, flags);
+
+       mmc_bus_get(host);
+       if (host->bus_ops && !host->bus_dead) {
+               mmc_power_up(host);
+               BUG_ON(!host->bus_ops->resume);
+               host->bus_ops->resume(host);
+       }
+
+       if (host->bus_ops->detect && !host->bus_dead)
+               host->bus_ops->detect(host);
+
+       mmc_bus_put(host);
+       printk("%s: Deferred resume completed\n", mmc_hostname(host));
+       return 0;
+}
+
+EXPORT_SYMBOL(mmc_resume_bus);
+
 /*
  * Assign a mmc bus handler to a host. Only one bus handler may control a
  * host at any given time.
@@ -1656,6 +1694,8 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay)
        spin_unlock_irqrestore(&host->lock, flags);
 #endif
        host->detect_change = 1;
+
+       wake_lock(&host->detect_wake_lock);
        mmc_schedule_delayed_work(&host->detect, delay);
 }
 
@@ -1815,8 +1855,13 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
        struct mmc_command cmd = {0};
        unsigned int qty = 0;
        unsigned long timeout;
+       unsigned int fr, nr;
        int err;
 
+       fr = from;
+       nr = to - from + 1;
+       trace_mmc_blk_erase_start(arg, fr, nr);
+
        /*
         * qty is used to calculate the erase timeout which depends on how many
         * erase groups (or allocation units in SD terminology) are affected.
@@ -1920,6 +1965,8 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
        } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
                 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
 out:
+
+       trace_mmc_blk_erase_end(arg, fr, nr);
        return err;
 }
 
@@ -2351,6 +2398,7 @@ void mmc_rescan(struct work_struct *work)
        struct mmc_host *host =
                container_of(work, struct mmc_host, detect.work);
        int i;
+       bool extend_wakelock = false;
 
        if (host->rescan_disable)
                return;
@@ -2372,6 +2420,12 @@ void mmc_rescan(struct work_struct *work)
 
        host->detect_change = 0;
 
+       /* If the card was removed the bus will be marked
+        * as dead - extend the wakelock so userspace
+        * can respond */
+       if (host->bus_dead)
+               extend_wakelock = 1;
+
        /*
         * Let mmc_bus_put() free the bus/bus_ops if we've found that
         * the card is no longer present.
@@ -2400,16 +2454,24 @@ void mmc_rescan(struct work_struct *work)
 
        mmc_claim_host(host);
        for (i = 0; i < ARRAY_SIZE(freqs); i++) {
-               if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+               if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) {
+                       extend_wakelock = true;
                        break;
+               }
                if (freqs[i] <= host->f_min)
                        break;
        }
        mmc_release_host(host);
 
  out:
-       if (host->caps & MMC_CAP_NEEDS_POLL)
+       if (extend_wakelock)
+               wake_lock_timeout(&host->detect_wake_lock, HZ / 2);
+       else
+               wake_unlock(&host->detect_wake_lock);
+       if (host->caps & MMC_CAP_NEEDS_POLL) {
+               wake_lock(&host->detect_wake_lock);
                mmc_schedule_delayed_work(&host->detect, HZ);
+       }
 }
 
 void mmc_start_host(struct mmc_host *host)
@@ -2433,7 +2495,8 @@ void mmc_stop_host(struct mmc_host *host)
 #endif
 
        host->rescan_disable = 1;
-       cancel_delayed_work_sync(&host->detect);
+       if (cancel_delayed_work_sync(&host->detect))
+               wake_unlock(&host->detect_wake_lock);
        mmc_flush_scheduled_work();
 
        /* clear pm flags now and let card drivers set them as needed */
@@ -2628,7 +2691,11 @@ int mmc_suspend_host(struct mmc_host *host)
 {
        int err = 0;
 
-       cancel_delayed_work(&host->detect);
+       if (mmc_bus_needs_resume(host))
+               return 0;
+
+       if (cancel_delayed_work(&host->detect))
+               wake_unlock(&host->detect_wake_lock);
        mmc_flush_scheduled_work();
 
        mmc_bus_get(host);
@@ -2679,6 +2746,12 @@ int mmc_resume_host(struct mmc_host *host)
        int err = 0;
 
        mmc_bus_get(host);
+       if (mmc_bus_manual_resume(host)) {
+               host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
+               mmc_bus_put(host);
+               return 0;
+       }
+
        if (host->bus_ops && !host->bus_dead) {
                if (!mmc_card_keep_power(host)) {
                        mmc_power_up(host);
@@ -2739,9 +2812,14 @@ int mmc_pm_notify(struct notifier_block *notify_block,
                }
 
                spin_lock_irqsave(&host->lock, flags);
+               if (mmc_bus_needs_resume(host)) {
+                       spin_unlock_irqrestore(&host->lock, flags);
+                       break;
+               }
                host->rescan_disable = 1;
                spin_unlock_irqrestore(&host->lock, flags);
-               cancel_delayed_work_sync(&host->detect);
+               if (cancel_delayed_work_sync(&host->detect))
+                       wake_unlock(&host->detect_wake_lock);
 
                if (!host->bus_ops || host->bus_ops->suspend)
                        break;
@@ -2762,6 +2840,10 @@ int mmc_pm_notify(struct notifier_block *notify_block,
        case PM_POST_RESTORE:
 
                spin_lock_irqsave(&host->lock, flags);
+               if (mmc_bus_manual_resume(host)) {
+                       spin_unlock_irqrestore(&host->lock, flags);
+                       break;
+               }
                host->rescan_disable = 0;
                spin_unlock_irqrestore(&host->lock, flags);
                mmc_detect_change(host, 0);
@@ -2789,6 +2871,22 @@ void mmc_init_context_info(struct mmc_host *host)
        init_waitqueue_head(&host->context_info.wait);
 }
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+void mmc_set_embedded_sdio_data(struct mmc_host *host,
+                               struct sdio_cis *cis,
+                               struct sdio_cccr *cccr,
+                               struct sdio_embedded_func *funcs,
+                               int num_funcs)
+{
+       host->embedded_sdio_data.cis = cis;
+       host->embedded_sdio_data.cccr = cccr;
+       host->embedded_sdio_data.funcs = funcs;
+       host->embedded_sdio_data.num_funcs = num_funcs;
+}
+
+EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
+#endif
+
 static int __init mmc_init(void)
 {
        int ret;
index 2a3593d9f87df3bf01bb4229d74c5aef12ed3fdd..56dadd667ec1505bf81862cefea8e5fb6816897c 100644 (file)
@@ -459,6 +459,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
 
        spin_lock_init(&host->lock);
        init_waitqueue_head(&host->wq);
+       wake_lock_init(&host->detect_wake_lock, WAKE_LOCK_SUSPEND,
+               kasprintf(GFP_KERNEL, "%s_detect", mmc_hostname(host)));
        INIT_DELAYED_WORK(&host->detect, mmc_rescan);
 #ifdef CONFIG_PM
        host->pm_notify.notifier_call = mmc_pm_notify;
@@ -511,7 +513,8 @@ int mmc_add_host(struct mmc_host *host)
        mmc_host_clk_sysfs_init(host);
 
        mmc_start_host(host);
-       register_pm_notifier(&host->pm_notify);
+       if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+               register_pm_notifier(&host->pm_notify);
 
        return 0;
 }
@@ -528,7 +531,9 @@ EXPORT_SYMBOL(mmc_add_host);
  */
 void mmc_remove_host(struct mmc_host *host)
 {
-       unregister_pm_notifier(&host->pm_notify);
+       if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+               unregister_pm_notifier(&host->pm_notify);
+
        mmc_stop_host(host);
 
 #ifdef CONFIG_DEBUG_FS
@@ -555,6 +560,7 @@ void mmc_free_host(struct mmc_host *host)
        spin_lock(&mmc_host_lock);
        idr_remove(&mmc_host_idr, host->index);
        spin_unlock(&mmc_host_lock);
+       wake_lock_destroy(&host->detect_wake_lock);
 
        put_device(&host->class_dev);
 }
index 9e645e19cec6c791151c4d6cb7672aa364641dac..f008318c5c4d6765263f7e328f175329a36eae55 100644 (file)
@@ -805,6 +805,9 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
        bool reinit)
 {
        int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+       int retries;
+#endif
 
        if (!reinit) {
                /*
@@ -831,7 +834,26 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
                /*
                 * Fetch switch information from card.
                 */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+               for (retries = 1; retries <= 3; retries++) {
+                       err = mmc_read_switch(card);
+                       if (!err) {
+                               if (retries > 1) {
+                                       printk(KERN_WARNING
+                                              "%s: recovered\n", 
+                                              mmc_hostname(host));
+                               }
+                               break;
+                       } else {
+                               printk(KERN_WARNING
+                                      "%s: read switch failed (attempt %d)\n",
+                                      mmc_hostname(host), retries);
+                       }
+               }
+#else
                err = mmc_read_switch(card);
+#endif
+
                if (err)
                        return err;
        }
@@ -1032,7 +1054,10 @@ static int mmc_sd_alive(struct mmc_host *host)
  */
 static void mmc_sd_detect(struct mmc_host *host)
 {
-       int err;
+       int err = 0;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+       int retries = 5;
+#endif
 
        BUG_ON(!host);
        BUG_ON(!host->card);
@@ -1042,7 +1067,23 @@ static void mmc_sd_detect(struct mmc_host *host)
        /*
         * Just check if our card has been removed.
         */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+       while(retries) {
+               err = mmc_send_status(host->card, NULL);
+               if (err) {
+                       retries--;
+                       udelay(5);
+                       continue;
+               }
+               break;
+       }
+       if (!retries) {
+               printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
+                      __func__, mmc_hostname(host), err);
+       }
+#else
        err = _mmc_detect_card_removed(host);
+#endif
 
        mmc_release_host(host);
 
@@ -1084,12 +1125,31 @@ static int mmc_sd_suspend(struct mmc_host *host)
 static int mmc_sd_resume(struct mmc_host *host)
 {
        int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+       int retries;
+#endif
 
        BUG_ON(!host);
        BUG_ON(!host->card);
 
        mmc_claim_host(host);
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+       retries = 5;
+       while (retries) {
+               err = mmc_sd_init_card(host, host->ocr, host->card);
+
+               if (err) {
+                       printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
+                              mmc_hostname(host), err, retries);
+                       mdelay(5);
+                       retries--;
+                       continue;
+               }
+               break;
+       }
+#else
        err = mmc_sd_init_card(host, host->ocr, host->card);
+#endif
        mmc_release_host(host);
 
        return err;
@@ -1143,6 +1203,9 @@ int mmc_attach_sd(struct mmc_host *host)
 {
        int err;
        u32 ocr;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+       int retries;
+#endif
 
        BUG_ON(!host);
        WARN_ON(!host->claimed);
@@ -1198,9 +1261,27 @@ int mmc_attach_sd(struct mmc_host *host)
        /*
         * Detect and init the card.
         */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+       retries = 5;
+       while (retries) {
+               err = mmc_sd_init_card(host, host->ocr, NULL);
+               if (err) {
+                       retries--;
+                       continue;
+               }
+               break;
+       }
+
+       if (!retries) {
+               printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n",
+                      mmc_hostname(host), err);
+               goto err;
+       }
+#else
        err = mmc_sd_init_card(host, host->ocr, NULL);
        if (err)
                goto err;
+#endif
 
        mmc_release_host(host);
        err = mmc_add_card(host->card);
index 6889a821c1dafcb932325332d8cd3dc4d35cd740..46e68f125ff2719a03cd6547dd97cda4f693aab4 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/err.h>
+#include <linux/module.h>
 #include <linux/pm_runtime.h>
 
 #include <linux/mmc/host.h>
 #include "sdio_ops.h"
 #include "sdio_cis.h"
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/sdio_ids.h>
+#endif
+
 static int sdio_read_fbr(struct sdio_func *func)
 {
        int ret;
@@ -728,19 +733,35 @@ try_again:
                goto finish;
        }
 
-       /*
-        * Read the common registers.
-        */
-       err = sdio_read_cccr(card, ocr);
-       if (err)
-               goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+       if (host->embedded_sdio_data.cccr)
+               memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
+       else {
+#endif
+               /*
+                * Read the common registers.
+                */
+               err = sdio_read_cccr(card,  ocr);
+               if (err)
+                       goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+       }
+#endif
 
-       /*
-        * Read the common CIS tuples.
-        */
-       err = sdio_read_common_cis(card);
-       if (err)
-               goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+       if (host->embedded_sdio_data.cis)
+               memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
+       else {
+#endif
+               /*
+                * Read the common CIS tuples.
+                */
+               err = sdio_read_common_cis(card);
+               if (err)
+                       goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+       }
+#endif
 
        if (oldcard) {
                int same = (card->cis.vendor == oldcard->cis.vendor &&
@@ -1147,14 +1168,36 @@ int mmc_attach_sdio(struct mmc_host *host)
        funcs = (ocr & 0x70000000) >> 28;
        card->sdio_funcs = 0;
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+       if (host->embedded_sdio_data.funcs)
+               card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
+#endif
+
        /*
         * Initialize (but don't add) all present functions.
         */
        for (i = 0; i < funcs; i++, card->sdio_funcs++) {
-               err = sdio_init_func(host->card, i + 1);
-               if (err)
-                       goto remove;
-
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+               if (host->embedded_sdio_data.funcs) {
+                       struct sdio_func *tmp;
+
+                       tmp = sdio_alloc_func(host->card);
+                       if (IS_ERR(tmp))
+                               goto remove;
+                       tmp->num = (i + 1);
+                       card->sdio_func[i] = tmp;
+                       tmp->class = host->embedded_sdio_data.funcs[i].f_class;
+                       tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
+                       tmp->vendor = card->cis.vendor;
+                       tmp->device = card->cis.device;
+               } else {
+#endif
+                       err = sdio_init_func(host->card, i + 1);
+                       if (err)
+                               goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+               }
+#endif
                /*
                 * Enable Runtime PM for this func (if supported)
                 */
@@ -1202,3 +1245,39 @@ err:
        return err;
 }
 
+int sdio_reset_comm(struct mmc_card *card)
+{
+       struct mmc_host *host = card->host;
+       u32 ocr;
+       int err;
+
+       printk("%s():\n", __func__);
+       mmc_claim_host(host);
+
+       mmc_go_idle(host);
+
+       mmc_set_clock(host, host->f_min);
+
+       err = mmc_send_io_op_cond(host, 0, &ocr);
+       if (err)
+               goto err;
+
+       host->ocr = mmc_select_voltage(host, ocr);
+       if (!host->ocr) {
+               err = -EINVAL;
+               goto err;
+       }
+
+       err = mmc_sdio_init_card(host, host->ocr, card, 0);
+       if (err)
+               goto err;
+
+       mmc_release_host(host);
+       return 0;
+err:
+       printk("%s: Error resetting SDIO communications (%d)\n",
+              mmc_hostname(host), err);
+       mmc_release_host(host);
+       return err;
+}
+EXPORT_SYMBOL(sdio_reset_comm);
index 546c67c2bbbf14aacfe6c3a2d3a8d66037705c1d..c012cf59b7d6acb564314137f79ed4e2842cd8a7 100644 (file)
 #include "sdio_cis.h"
 #include "sdio_bus.h"
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/host.h>
+#endif
+
 /* show configuration fields */
 #define sdio_config_attr(field, format_string)                         \
 static ssize_t                                                         \
@@ -270,7 +274,14 @@ static void sdio_release_func(struct device *dev)
 {
        struct sdio_func *func = dev_to_sdio_func(dev);
 
-       sdio_free_func_cis(func);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+       /*
+        * If this device is embedded then we never allocated
+        * cis tables for this func
+        */
+       if (!func->card->host->embedded_sdio_data.funcs)
+#endif
+               sdio_free_func_cis(func);
 
        kfree(func->info);
 
old mode 100644 (file)
new mode 100755 (executable)
index 78cb4d5..8fdeb07
@@ -383,6 +383,39 @@ u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
 }
 EXPORT_SYMBOL_GPL(sdio_readb);
 
+/**
+ *     sdio_readb_ext - read a single byte from a SDIO function
+ *     @func: SDIO function to access
+ *     @addr: address to read
+ *     @err_ret: optional status value from transfer
+ *     @in: value to add to argument
+ *
+ *     Reads a single byte from the address space of a given SDIO
+ *     function. If there is a problem reading the address, 0xff
+ *     is returned and @err_ret will contain the error code.
+ */
+unsigned char sdio_readb_ext(struct sdio_func *func, unsigned int addr,
+       int *err_ret, unsigned in)
+{
+       int ret;
+       unsigned char val;
+
+       BUG_ON(!func);
+
+       if (err_ret)
+               *err_ret = 0;
+
+       ret = mmc_io_rw_direct(func->card, 0, func->num, addr, (u8)in, &val);
+       if (ret) {
+               if (err_ret)
+                       *err_ret = ret;
+               return 0xFF;
+       }
+
+       return val;
+}
+EXPORT_SYMBOL_GPL(sdio_readb_ext);
+
 /**
  *     sdio_writeb - write a single byte to a SDIO function
  *     @func: SDIO function to access
index 50543f1662150ade806e9967190c60426a9ab029..f2ab08c2c5fbb763773633b208b59d3137a511eb 100644 (file)
@@ -1,3 +1,10 @@
+config MTD_NAND_IDS
+       tristate "Include chip ids for known NAND devices."
+       depends on MTD
+       help
+         Useful for NAND drivers that do not use the NAND subsystem but
+         still like to take advantage of the known chip information.
+
 config MTD_NAND_ECC
        tristate
 
@@ -133,9 +140,6 @@ config BCH_CONST_T
        default 8 if MTD_NAND_OMAP_BCH8
 endif
 
-config MTD_NAND_IDS
-       tristate
-
 config MTD_NAND_RICOH
        tristate "Ricoh xD card reader"
        default n
index 1373c6d7278d84dbcfb84a936745e01d8273aa44..282aec4860ebe56b6097a3d6a20da7725c103e2c 100644 (file)
@@ -149,6 +149,23 @@ config PPPOL2TP
          tunnels. L2TP is replacing PPTP for VPN uses.
 if TTY
 
+config PPPOLAC
+       tristate "PPP on L2TP Access Concentrator"
+       depends on PPP && INET
+       help
+         L2TP (RFC 2661) is a tunneling protocol widely used in virtual private
+         networks. This driver handles L2TP data packets between a UDP socket
+         and a PPP channel, but only permits one session per socket. Thus it is
+         fairly simple and suited for clients.
+
+config PPPOPNS
+       tristate "PPP on PPTP Network Server"
+       depends on PPP && INET
+       help
+         PPTP (RFC 2637) is a tunneling protocol widely used in virtual private
+         networks. This driver handles PPTP data packets between a RAW socket
+         and a PPP channel. It is fairly simple and easy to use.
+
 config PPP_ASYNC
        tristate "PPP support for async serial ports"
        depends on PPP
index a6b6297b00669a3f1e0852f83780550a295e3216..d283d03c4683a9daa8f27f592d3f990ea8af0b79 100644 (file)
@@ -11,3 +11,5 @@ obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
 obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
 obj-$(CONFIG_PPPOL2TP) += pppox.o
 obj-$(CONFIG_PPTP) += pppox.o pptp.o
+obj-$(CONFIG_PPPOLAC) += pppox.o pppolac.o
+obj-$(CONFIG_PPPOPNS) += pppox.o pppopns.o
diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c
new file mode 100644 (file)
index 0000000..a5d3d63
--- /dev/null
@@ -0,0 +1,449 @@
+/* drivers/net/pppolac.c
+ *
+ * Driver for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles L2TP data packets between a UDP socket and a PPP channel.
+ * The socket must keep connected, and only one session per socket is permitted.
+ * Sequencing of outgoing packets is controlled by LNS. Incoming packets with
+ * sequences are reordered within a sliding window of one second. Currently
+ * reordering only happens when a packet is received. It is done for simplicity
+ * since no additional locks or threads are required. This driver only works on
+ * IPv4 due to the lack of UDP encapsulation support in IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/udp.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <net/tcp_states.h>
+#include <asm/uaccess.h>
+
+#define L2TP_CONTROL_BIT       0x80
+#define L2TP_LENGTH_BIT                0x40
+#define L2TP_SEQUENCE_BIT      0x08
+#define L2TP_OFFSET_BIT                0x02
+#define L2TP_VERSION           0x02
+#define L2TP_VERSION_MASK      0x0F
+
+#define PPP_ADDR       0xFF
+#define PPP_CTRL       0x03
+
+union unaligned {
+       __u32 u32;
+} __attribute__((packed));
+
+static inline union unaligned *unaligned(void *ptr)
+{
+       return (union unaligned *)ptr;
+}
+
+struct meta {
+       __u32 sequence;
+       __u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+       return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb)
+{
+       struct sock *sk = (struct sock *)sk_udp->sk_user_data;
+       struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac;
+       struct meta *meta = skb_meta(skb);
+       __u32 now = jiffies;
+       __u8 bits;
+       __u8 *ptr;
+
+       /* Drop the packet if L2TP header is missing. */
+       if (skb->len < sizeof(struct udphdr) + 6)
+               goto drop;
+
+       /* Put it back if it is a control packet. */
+       if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT)
+               return opt->backlog_rcv(sk_udp, skb);
+
+       /* Skip UDP header. */
+       skb_pull(skb, sizeof(struct udphdr));
+
+       /* Check the version. */
+       if ((skb->data[1] & L2TP_VERSION_MASK) != L2TP_VERSION)
+               goto drop;
+       bits = skb->data[0];
+       ptr = &skb->data[2];
+
+       /* Check the length if it is present. */
+       if (bits & L2TP_LENGTH_BIT) {
+               if ((ptr[0] << 8 | ptr[1]) != skb->len)
+                       goto drop;
+               ptr += 2;
+       }
+
+       /* Skip all fields including optional ones. */
+       if (!skb_pull(skb, 6 + (bits & L2TP_SEQUENCE_BIT ? 4 : 0) +
+                       (bits & L2TP_LENGTH_BIT ? 2 : 0) +
+                       (bits & L2TP_OFFSET_BIT ? 2 : 0)))
+               goto drop;
+
+       /* Skip the offset padding if it is present. */
+       if (bits & L2TP_OFFSET_BIT &&
+                       !skb_pull(skb, skb->data[-2] << 8 | skb->data[-1]))
+               goto drop;
+
+       /* Check the tunnel and the session. */
+       if (unaligned(ptr)->u32 != opt->local)
+               goto drop;
+
+       /* Check the sequence if it is present. */
+       if (bits & L2TP_SEQUENCE_BIT) {
+               meta->sequence = ptr[4] << 8 | ptr[5];
+               if ((__s16)(meta->sequence - opt->recv_sequence) < 0)
+                       goto drop;
+       }
+
+       /* Skip PPP address and control if they are present. */
+       if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+                       skb->data[1] == PPP_CTRL)
+               skb_pull(skb, 2);
+
+       /* Fix PPP protocol if it is compressed. */
+       if (skb->len >= 1 && skb->data[0] & 1)
+               skb_push(skb, 1)[0] = 0;
+
+       /* Drop the packet if PPP protocol is missing. */
+       if (skb->len < 2)
+               goto drop;
+
+       /* Perform reordering if sequencing is enabled. */
+       atomic_set(&opt->sequencing, bits & L2TP_SEQUENCE_BIT);
+       if (bits & L2TP_SEQUENCE_BIT) {
+               struct sk_buff *skb1;
+
+               /* Insert the packet into receive queue in order. */
+               skb_set_owner_r(skb, sk);
+               skb_queue_walk(&sk->sk_receive_queue, skb1) {
+                       struct meta *meta1 = skb_meta(skb1);
+                       __s16 order = meta->sequence - meta1->sequence;
+                       if (order == 0)
+                               goto drop;
+                       if (order < 0) {
+                               meta->timestamp = meta1->timestamp;
+                               skb_insert(skb1, skb, &sk->sk_receive_queue);
+                               skb = NULL;
+                               break;
+                       }
+               }
+               if (skb) {
+                       meta->timestamp = now;
+                       skb_queue_tail(&sk->sk_receive_queue, skb);
+               }
+
+               /* Remove packets from receive queue as long as
+                * 1. the receive buffer is full,
+                * 2. they are queued longer than one second, or
+                * 3. there are no missing packets before them. */
+               skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+                       meta = skb_meta(skb);
+                       if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+                                       now - meta->timestamp < HZ &&
+                                       meta->sequence != opt->recv_sequence)
+                               break;
+                       skb_unlink(skb, &sk->sk_receive_queue);
+                       opt->recv_sequence = (__u16)(meta->sequence + 1);
+                       skb_orphan(skb);
+                       ppp_input(&pppox_sk(sk)->chan, skb);
+               }
+               return NET_RX_SUCCESS;
+       }
+
+       /* Flush receive queue if sequencing is disabled. */
+       skb_queue_purge(&sk->sk_receive_queue);
+       skb_orphan(skb);
+       ppp_input(&pppox_sk(sk)->chan, skb);
+       return NET_RX_SUCCESS;
+drop:
+       kfree_skb(skb);
+       return NET_RX_DROP;
+}
+
+static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb)
+{
+       sock_hold(sk_udp);
+       sk_receive_skb(sk_udp, skb, 0);
+       return 0;
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppolac_xmit_core(struct work_struct *delivery_work)
+{
+       mm_segment_t old_fs = get_fs();
+       struct sk_buff *skb;
+
+       set_fs(KERNEL_DS);
+       while ((skb = skb_dequeue(&delivery_queue))) {
+               struct sock *sk_udp = skb->sk;
+               struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+               struct msghdr msg = {
+                       .msg_iov = (struct iovec *)&iov,
+                       .msg_iovlen = 1,
+                       .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+               };
+               sk_udp->sk_prot->sendmsg(NULL, sk_udp, &msg, skb->len);
+               kfree_skb(skb);
+       }
+       set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppolac_xmit_core);
+
+static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+       struct sock *sk_udp = (struct sock *)chan->private;
+       struct pppolac_opt *opt = &pppox_sk(sk_udp->sk_user_data)->proto.lac;
+
+       /* Install PPP address and control. */
+       skb_push(skb, 2);
+       skb->data[0] = PPP_ADDR;
+       skb->data[1] = PPP_CTRL;
+
+       /* Install L2TP header. */
+       if (atomic_read(&opt->sequencing)) {
+               skb_push(skb, 10);
+               skb->data[0] = L2TP_SEQUENCE_BIT;
+               skb->data[6] = opt->xmit_sequence >> 8;
+               skb->data[7] = opt->xmit_sequence;
+               skb->data[8] = 0;
+               skb->data[9] = 0;
+               opt->xmit_sequence++;
+       } else {
+               skb_push(skb, 6);
+               skb->data[0] = 0;
+       }
+       skb->data[1] = L2TP_VERSION;
+       unaligned(&skb->data[2])->u32 = opt->remote;
+
+       /* Now send the packet via the delivery queue. */
+       skb_set_owner_w(skb, sk_udp);
+       skb_queue_tail(&delivery_queue, skb);
+       schedule_work(&delivery_work);
+       return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppolac_channel_ops = {
+       .start_xmit = pppolac_xmit,
+};
+
+static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr,
+       int addrlen, int flags)
+{
+       struct sock *sk = sock->sk;
+       struct pppox_sock *po = pppox_sk(sk);
+       struct sockaddr_pppolac *addr = (struct sockaddr_pppolac *)useraddr;
+       struct socket *sock_udp = NULL;
+       struct sock *sk_udp;
+       int error;
+
+       if (addrlen != sizeof(struct sockaddr_pppolac) ||
+                       !addr->local.tunnel || !addr->local.session ||
+                       !addr->remote.tunnel || !addr->remote.session) {
+               return -EINVAL;
+       }
+
+       lock_sock(sk);
+       error = -EALREADY;
+       if (sk->sk_state != PPPOX_NONE)
+               goto out;
+
+       sock_udp = sockfd_lookup(addr->udp_socket, &error);
+       if (!sock_udp)
+               goto out;
+       sk_udp = sock_udp->sk;
+       lock_sock(sk_udp);
+
+       /* Remove this check when IPv6 supports UDP encapsulation. */
+       error = -EAFNOSUPPORT;
+       if (sk_udp->sk_family != AF_INET)
+               goto out;
+       error = -EPROTONOSUPPORT;
+       if (sk_udp->sk_protocol != IPPROTO_UDP)
+               goto out;
+       error = -EDESTADDRREQ;
+       if (sk_udp->sk_state != TCP_ESTABLISHED)
+               goto out;
+       error = -EBUSY;
+       if (udp_sk(sk_udp)->encap_type || sk_udp->sk_user_data)
+               goto out;
+       if (!sk_udp->sk_bound_dev_if) {
+               struct dst_entry *dst = sk_dst_get(sk_udp);
+               error = -ENODEV;
+               if (!dst)
+                       goto out;
+               sk_udp->sk_bound_dev_if = dst->dev->ifindex;
+               dst_release(dst);
+       }
+
+       po->chan.hdrlen = 12;
+       po->chan.private = sk_udp;
+       po->chan.ops = &pppolac_channel_ops;
+       po->chan.mtu = PPP_MRU - 80;
+       po->proto.lac.local = unaligned(&addr->local)->u32;
+       po->proto.lac.remote = unaligned(&addr->remote)->u32;
+       atomic_set(&po->proto.lac.sequencing, 1);
+       po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv;
+
+       error = ppp_register_channel(&po->chan);
+       if (error)
+               goto out;
+
+       sk->sk_state = PPPOX_CONNECTED;
+       udp_sk(sk_udp)->encap_type = UDP_ENCAP_L2TPINUDP;
+       udp_sk(sk_udp)->encap_rcv = pppolac_recv;
+       sk_udp->sk_backlog_rcv = pppolac_recv_core;
+       sk_udp->sk_user_data = sk;
+out:
+       if (sock_udp) {
+               release_sock(sk_udp);
+               if (error)
+                       sockfd_put(sock_udp);
+       }
+       release_sock(sk);
+       return error;
+}
+
+static int pppolac_release(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+
+       if (!sk)
+               return 0;
+
+       lock_sock(sk);
+       if (sock_flag(sk, SOCK_DEAD)) {
+               release_sock(sk);
+               return -EBADF;
+       }
+
+       if (sk->sk_state != PPPOX_NONE) {
+               struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private;
+               lock_sock(sk_udp);
+               skb_queue_purge(&sk->sk_receive_queue);
+               pppox_unbind_sock(sk);
+               udp_sk(sk_udp)->encap_type = 0;
+               udp_sk(sk_udp)->encap_rcv = NULL;
+               sk_udp->sk_backlog_rcv = pppox_sk(sk)->proto.lac.backlog_rcv;
+               sk_udp->sk_user_data = NULL;
+               release_sock(sk_udp);
+               sockfd_put(sk_udp->sk_socket);
+       }
+
+       sock_orphan(sk);
+       sock->sk = NULL;
+       release_sock(sk);
+       sock_put(sk);
+       return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppolac_proto = {
+       .name = "PPPOLAC",
+       .owner = THIS_MODULE,
+       .obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppolac_proto_ops = {
+       .family = PF_PPPOX,
+       .owner = THIS_MODULE,
+       .release = pppolac_release,
+       .bind = sock_no_bind,
+       .connect = pppolac_connect,
+       .socketpair = sock_no_socketpair,
+       .accept = sock_no_accept,
+       .getname = sock_no_getname,
+       .poll = sock_no_poll,
+       .ioctl = pppox_ioctl,
+       .listen = sock_no_listen,
+       .shutdown = sock_no_shutdown,
+       .setsockopt = sock_no_setsockopt,
+       .getsockopt = sock_no_getsockopt,
+       .sendmsg = sock_no_sendmsg,
+       .recvmsg = sock_no_recvmsg,
+       .mmap = sock_no_mmap,
+};
+
+static int pppolac_create(struct net *net, struct socket *sock)
+{
+       struct sock *sk;
+
+       sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppolac_proto);
+       if (!sk)
+               return -ENOMEM;
+
+       sock_init_data(sock, sk);
+       sock->state = SS_UNCONNECTED;
+       sock->ops = &pppolac_proto_ops;
+       sk->sk_protocol = PX_PROTO_OLAC;
+       sk->sk_state = PPPOX_NONE;
+       return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppolac_pppox_proto = {
+       .create = pppolac_create,
+       .owner = THIS_MODULE,
+};
+
+static int __init pppolac_init(void)
+{
+       int error;
+
+       error = proto_register(&pppolac_proto, 0);
+       if (error)
+               return error;
+
+       error = register_pppox_proto(PX_PROTO_OLAC, &pppolac_pppox_proto);
+       if (error)
+               proto_unregister(&pppolac_proto);
+       else
+               skb_queue_head_init(&delivery_queue);
+       return error;
+}
+
+static void __exit pppolac_exit(void)
+{
+       unregister_pppox_proto(PX_PROTO_OLAC);
+       proto_unregister(&pppolac_proto);
+}
+
+module_init(pppolac_init);
+module_exit(pppolac_exit);
+
+MODULE_DESCRIPTION("PPP on L2TP Access Concentrator (PPPoLAC)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ppp/pppopns.c b/drivers/net/ppp/pppopns.c
new file mode 100644 (file)
index 0000000..6016d29
--- /dev/null
@@ -0,0 +1,428 @@
+/* drivers/net/pppopns.c
+ *
+ * Driver for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles PPTP data packets between a RAW socket and a PPP channel.
+ * The socket is created in the kernel space and connected to the same address
+ * of the control socket. Outgoing packets are always sent with sequences but
+ * without acknowledgements. Incoming packets with sequences are reordered
+ * within a sliding window of one second. Currently reordering only happens when
+ * a packet is received. It is done for simplicity since no additional locks or
+ * threads are required. This driver should work on both IPv4 and IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/ppp_defs.h>
+#include <linux/if.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <asm/uaccess.h>
+
+#define GRE_HEADER_SIZE                8
+
+#define PPTP_GRE_BITS          htons(0x2001)
+#define PPTP_GRE_BITS_MASK     htons(0xEF7F)
+#define PPTP_GRE_SEQ_BIT       htons(0x1000)
+#define PPTP_GRE_ACK_BIT       htons(0x0080)
+#define PPTP_GRE_TYPE          htons(0x880B)
+
+#define PPP_ADDR       0xFF
+#define PPP_CTRL       0x03
+
+struct header {
+       __u16   bits;
+       __u16   type;
+       __u16   length;
+       __u16   call;
+       __u32   sequence;
+} __attribute__((packed));
+
+struct meta {
+       __u32 sequence;
+       __u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+       return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb)
+{
+       struct sock *sk = (struct sock *)sk_raw->sk_user_data;
+       struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns;
+       struct meta *meta = skb_meta(skb);
+       __u32 now = jiffies;
+       struct header *hdr;
+
+       /* Skip transport header */
+       skb_pull(skb, skb_transport_header(skb) - skb->data);
+
+       /* Drop the packet if GRE header is missing. */
+       if (skb->len < GRE_HEADER_SIZE)
+               goto drop;
+       hdr = (struct header *)skb->data;
+
+       /* Check the header. */
+       if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local ||
+                       (hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS)
+               goto drop;
+
+       /* Skip all fields including optional ones. */
+       if (!skb_pull(skb, GRE_HEADER_SIZE +
+                       (hdr->bits & PPTP_GRE_SEQ_BIT ? 4 : 0) +
+                       (hdr->bits & PPTP_GRE_ACK_BIT ? 4 : 0)))
+               goto drop;
+
+       /* Check the length. */
+       if (skb->len != ntohs(hdr->length))
+               goto drop;
+
+       /* Check the sequence if it is present. */
+       if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+               meta->sequence = ntohl(hdr->sequence);
+               if ((__s32)(meta->sequence - opt->recv_sequence) < 0)
+                       goto drop;
+       }
+
+       /* Skip PPP address and control if they are present. */
+       if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+                       skb->data[1] == PPP_CTRL)
+               skb_pull(skb, 2);
+
+       /* Fix PPP protocol if it is compressed. */
+       if (skb->len >= 1 && skb->data[0] & 1)
+               skb_push(skb, 1)[0] = 0;
+
+       /* Drop the packet if PPP protocol is missing. */
+       if (skb->len < 2)
+               goto drop;
+
+       /* Perform reordering if sequencing is enabled. */
+       if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+               struct sk_buff *skb1;
+
+               /* Insert the packet into receive queue in order. */
+               skb_set_owner_r(skb, sk);
+               skb_queue_walk(&sk->sk_receive_queue, skb1) {
+                       struct meta *meta1 = skb_meta(skb1);
+                       __s32 order = meta->sequence - meta1->sequence;
+                       if (order == 0)
+                               goto drop;
+                       if (order < 0) {
+                               meta->timestamp = meta1->timestamp;
+                               skb_insert(skb1, skb, &sk->sk_receive_queue);
+                               skb = NULL;
+                               break;
+                       }
+               }
+               if (skb) {
+                       meta->timestamp = now;
+                       skb_queue_tail(&sk->sk_receive_queue, skb);
+               }
+
+               /* Remove packets from receive queue as long as
+                * 1. the receive buffer is full,
+                * 2. they are queued longer than one second, or
+                * 3. there are no missing packets before them. */
+               skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+                       meta = skb_meta(skb);
+                       if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+                                       now - meta->timestamp < HZ &&
+                                       meta->sequence != opt->recv_sequence)
+                               break;
+                       skb_unlink(skb, &sk->sk_receive_queue);
+                       opt->recv_sequence = meta->sequence + 1;
+                       skb_orphan(skb);
+                       ppp_input(&pppox_sk(sk)->chan, skb);
+               }
+               return NET_RX_SUCCESS;
+       }
+
+       /* Flush receive queue if sequencing is disabled. */
+       skb_queue_purge(&sk->sk_receive_queue);
+       skb_orphan(skb);
+       ppp_input(&pppox_sk(sk)->chan, skb);
+       return NET_RX_SUCCESS;
+drop:
+       kfree_skb(skb);
+       return NET_RX_DROP;
+}
+
+static void pppopns_recv(struct sock *sk_raw, int length)
+{
+       struct sk_buff *skb;
+       while ((skb = skb_dequeue(&sk_raw->sk_receive_queue))) {
+               sock_hold(sk_raw);
+               sk_receive_skb(sk_raw, skb, 0);
+       }
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppopns_xmit_core(struct work_struct *delivery_work)
+{
+       mm_segment_t old_fs = get_fs();
+       struct sk_buff *skb;
+
+       set_fs(KERNEL_DS);
+       while ((skb = skb_dequeue(&delivery_queue))) {
+               struct sock *sk_raw = skb->sk;
+               struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+               struct msghdr msg = {
+                       .msg_iov = (struct iovec *)&iov,
+                       .msg_iovlen = 1,
+                       .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT,
+               };
+               sk_raw->sk_prot->sendmsg(NULL, sk_raw, &msg, skb->len);
+               kfree_skb(skb);
+       }
+       set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppopns_xmit_core);
+
+static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+       struct sock *sk_raw = (struct sock *)chan->private;
+       struct pppopns_opt *opt = &pppox_sk(sk_raw->sk_user_data)->proto.pns;
+       struct header *hdr;
+       __u16 length;
+
+       /* Install PPP address and control. */
+       skb_push(skb, 2);
+       skb->data[0] = PPP_ADDR;
+       skb->data[1] = PPP_CTRL;
+       length = skb->len;
+
+       /* Install PPTP GRE header. */
+       hdr = (struct header *)skb_push(skb, 12);
+       hdr->bits = PPTP_GRE_BITS | PPTP_GRE_SEQ_BIT;
+       hdr->type = PPTP_GRE_TYPE;
+       hdr->length = htons(length);
+       hdr->call = opt->remote;
+       hdr->sequence = htonl(opt->xmit_sequence);
+       opt->xmit_sequence++;
+
+       /* Now send the packet via the delivery queue. */
+       skb_set_owner_w(skb, sk_raw);
+       skb_queue_tail(&delivery_queue, skb);
+       schedule_work(&delivery_work);
+       return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppopns_channel_ops = {
+       .start_xmit = pppopns_xmit,
+};
+
+static int pppopns_connect(struct socket *sock, struct sockaddr *useraddr,
+       int addrlen, int flags)
+{
+       struct sock *sk = sock->sk;
+       struct pppox_sock *po = pppox_sk(sk);
+       struct sockaddr_pppopns *addr = (struct sockaddr_pppopns *)useraddr;
+       struct sockaddr_storage ss;
+       struct socket *sock_tcp = NULL;
+       struct socket *sock_raw = NULL;
+       struct sock *sk_tcp;
+       struct sock *sk_raw;
+       int error;
+
+       if (addrlen != sizeof(struct sockaddr_pppopns))
+               return -EINVAL;
+
+       lock_sock(sk);
+       error = -EALREADY;
+       if (sk->sk_state != PPPOX_NONE)
+               goto out;
+
+       sock_tcp = sockfd_lookup(addr->tcp_socket, &error);
+       if (!sock_tcp)
+               goto out;
+       sk_tcp = sock_tcp->sk;
+       error = -EPROTONOSUPPORT;
+       if (sk_tcp->sk_protocol != IPPROTO_TCP)
+               goto out;
+       addrlen = sizeof(struct sockaddr_storage);
+       error = kernel_getpeername(sock_tcp, (struct sockaddr *)&ss, &addrlen);
+       if (error)
+               goto out;
+       if (!sk_tcp->sk_bound_dev_if) {
+               struct dst_entry *dst = sk_dst_get(sk_tcp);
+               error = -ENODEV;
+               if (!dst)
+                       goto out;
+               sk_tcp->sk_bound_dev_if = dst->dev->ifindex;
+               dst_release(dst);
+       }
+
+       error = sock_create(ss.ss_family, SOCK_RAW, IPPROTO_GRE, &sock_raw);
+       if (error)
+               goto out;
+       sk_raw = sock_raw->sk;
+       sk_raw->sk_bound_dev_if = sk_tcp->sk_bound_dev_if;
+       error = kernel_connect(sock_raw, (struct sockaddr *)&ss, addrlen, 0);
+       if (error)
+               goto out;
+
+       po->chan.hdrlen = 14;
+       po->chan.private = sk_raw;
+       po->chan.ops = &pppopns_channel_ops;
+       po->chan.mtu = PPP_MRU - 80;
+       po->proto.pns.local = addr->local;
+       po->proto.pns.remote = addr->remote;
+       po->proto.pns.data_ready = sk_raw->sk_data_ready;
+       po->proto.pns.backlog_rcv = sk_raw->sk_backlog_rcv;
+
+       error = ppp_register_channel(&po->chan);
+       if (error)
+               goto out;
+
+       sk->sk_state = PPPOX_CONNECTED;
+       lock_sock(sk_raw);
+       sk_raw->sk_data_ready = pppopns_recv;
+       sk_raw->sk_backlog_rcv = pppopns_recv_core;
+       sk_raw->sk_user_data = sk;
+       release_sock(sk_raw);
+out:
+       if (sock_tcp)
+               sockfd_put(sock_tcp);
+       if (error && sock_raw)
+               sock_release(sock_raw);
+       release_sock(sk);
+       return error;
+}
+
+static int pppopns_release(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+
+       if (!sk)
+               return 0;
+
+       lock_sock(sk);
+       if (sock_flag(sk, SOCK_DEAD)) {
+               release_sock(sk);
+               return -EBADF;
+       }
+
+       if (sk->sk_state != PPPOX_NONE) {
+               struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private;
+               lock_sock(sk_raw);
+               skb_queue_purge(&sk->sk_receive_queue);
+               pppox_unbind_sock(sk);
+               sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready;
+               sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv;
+               sk_raw->sk_user_data = NULL;
+               release_sock(sk_raw);
+               sock_release(sk_raw->sk_socket);
+       }
+
+       sock_orphan(sk);
+       sock->sk = NULL;
+       release_sock(sk);
+       sock_put(sk);
+       return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppopns_proto = {
+       .name = "PPPOPNS",
+       .owner = THIS_MODULE,
+       .obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppopns_proto_ops = {
+       .family = PF_PPPOX,
+       .owner = THIS_MODULE,
+       .release = pppopns_release,
+       .bind = sock_no_bind,
+       .connect = pppopns_connect,
+       .socketpair = sock_no_socketpair,
+       .accept = sock_no_accept,
+       .getname = sock_no_getname,
+       .poll = sock_no_poll,
+       .ioctl = pppox_ioctl,
+       .listen = sock_no_listen,
+       .shutdown = sock_no_shutdown,
+       .setsockopt = sock_no_setsockopt,
+       .getsockopt = sock_no_getsockopt,
+       .sendmsg = sock_no_sendmsg,
+       .recvmsg = sock_no_recvmsg,
+       .mmap = sock_no_mmap,
+};
+
+static int pppopns_create(struct net *net, struct socket *sock)
+{
+       struct sock *sk;
+
+       sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppopns_proto);
+       if (!sk)
+               return -ENOMEM;
+
+       sock_init_data(sock, sk);
+       sock->state = SS_UNCONNECTED;
+       sock->ops = &pppopns_proto_ops;
+       sk->sk_protocol = PX_PROTO_OPNS;
+       sk->sk_state = PPPOX_NONE;
+       return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppopns_pppox_proto = {
+       .create = pppopns_create,
+       .owner = THIS_MODULE,
+};
+
+static int __init pppopns_init(void)
+{
+       int error;
+
+       error = proto_register(&pppopns_proto, 0);
+       if (error)
+               return error;
+
+       error = register_pppox_proto(PX_PROTO_OPNS, &pppopns_pppox_proto);
+       if (error)
+               proto_unregister(&pppopns_proto);
+       else
+               skb_queue_head_init(&delivery_queue);
+       return error;
+}
+
+static void __exit pppopns_exit(void)
+{
+       unregister_pppox_proto(PX_PROTO_OPNS);
+       proto_unregister(&pppopns_proto);
+}
+
+module_init(pppopns_init);
+module_exit(pppopns_exit);
+
+MODULE_DESCRIPTION("PPP on PPTP Network Server (PPPoPNS)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
index 582497103fe88ee4b659f646107437a66e784a2f..aeff706f16346a6327b9918c6f8d8f94032e630e 100644 (file)
@@ -1898,6 +1898,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
        int vnet_hdr_sz;
        int ret;
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+       if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) {
+               return -EPERM;
+       }
+#endif
+
        if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
                if (copy_from_user(&ifr, argp, ifreq_len))
                        return -EFAULT;
index f8f0156dff4e49b6fbe9fd1133055ad829f25f1b..5b0a49cf1dea2ec821054e7f09c340f6f6cac963 100644 (file)
@@ -264,6 +264,11 @@ config MWL8K
          To compile this driver as a module, choose M here: the module
          will be called mwl8k.  If unsure, say N.
 
+config WIFI_CONTROL_FUNC
+       bool "Enable WiFi control function abstraction"
+       help
+         Enables Power/Reset/Carddetect function abstraction
+
 source "drivers/net/wireless/ath/Kconfig"
 source "drivers/net/wireless/b43/Kconfig"
 source "drivers/net/wireless/b43legacy/Kconfig"
index 4911158cba8a154b80aaeac44ef39a05741d93df..26d7060c17bfb49cd9069871609ad092df3d2e7b 100644 (file)
@@ -702,36 +702,66 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
        return 0;
 }
 
+/*
+ * Convert configs to something easy to use in C code
+ */
+#if defined(CONFIG_CMDLINE_FORCE)
+static const int overwrite_incoming_cmdline = 1;
+static const int read_dt_cmdline;
+static const int concat_cmdline;
+#elif defined(CONFIG_CMDLINE_EXTEND)
+static const int overwrite_incoming_cmdline;
+static const int read_dt_cmdline = 1;
+static const int concat_cmdline = 1;
+#else /* CMDLINE_FROM_BOOTLOADER */
+static const int overwrite_incoming_cmdline;
+static const int read_dt_cmdline = 1;
+static const int concat_cmdline;
+#endif
+
+#ifdef CONFIG_CMDLINE
+static const char *config_cmdline = CONFIG_CMDLINE;
+#else
+static const char *config_cmdline = "";
+#endif
+
 int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
                                     int depth, void *data)
 {
        int l;
        const char *p;
+       char *cmdline = data;
 
        pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
 
-       if (depth != 1 || !data ||
+       if (depth != 1 || !cmdline ||
            (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
                return 0;
 
        early_init_dt_check_for_initrd(node);
 
-       /* Retrieve command line */
-       p = of_get_flat_dt_prop(node, "bootargs", &l);
-       if (p != NULL && l > 0)
-               strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE));
-
-       /*
-        * CONFIG_CMDLINE is meant to be a default in case nothing else
-        * managed to set the command line, unless CONFIG_CMDLINE_FORCE
-        * is set in which case we override whatever was found earlier.
-        */
-#ifdef CONFIG_CMDLINE
-#ifndef CONFIG_CMDLINE_FORCE
-       if (!((char *)data)[0])
-#endif
-               strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif /* CONFIG_CMDLINE */
+       /* Put CONFIG_CMDLINE in if forced or if data had nothing in it to start */
+       if (overwrite_incoming_cmdline || !cmdline[0])
+               strlcpy(cmdline, config_cmdline, COMMAND_LINE_SIZE);
+
+       /* Retrieve command line unless forcing */
+       if (read_dt_cmdline)
+               p = of_get_flat_dt_prop(node, "bootargs", &l);
+
+       if (p != NULL && l > 0) {
+               if (concat_cmdline) {
+                       int cmdline_len;
+                       int copy_len;
+                       strlcat(cmdline, " ", COMMAND_LINE_SIZE);
+                       cmdline_len = strlen(cmdline);
+                       copy_len = COMMAND_LINE_SIZE - cmdline_len - 1;
+                       copy_len = min((int)l, copy_len);
+                       strncpy(cmdline + cmdline_len, p, copy_len);
+                       cmdline[cmdline_len + copy_len] = '\0';
+               } else {
+                       strlcpy(cmdline, p, min((int)l, COMMAND_LINE_SIZE));
+               }
+       }
 
        pr_debug("Command line is: %s\n", (char*)data);
 
index 1c517c34e4be8edf7145f93eeabc1768c0295f4a..082d3c2714e9cab8b7c54900c06aac6f75e7dd59 100644 (file)
@@ -67,23 +67,40 @@ static int __power_supply_changed_work(struct device *dev, void *data)
 
 static void power_supply_changed_work(struct work_struct *work)
 {
+       unsigned long flags;
        struct power_supply *psy = container_of(work, struct power_supply,
                                                changed_work);
 
        dev_dbg(psy->dev, "%s\n", __func__);
 
-       class_for_each_device(power_supply_class, NULL, psy,
-                             __power_supply_changed_work);
+       spin_lock_irqsave(&psy->changed_lock, flags);
+       if (psy->changed) {
+               psy->changed = false;
+               spin_unlock_irqrestore(&psy->changed_lock, flags);
 
-       power_supply_update_leds(psy);
+               class_for_each_device(power_supply_class, NULL, psy,
+                                     __power_supply_changed_work);
 
-       kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+               power_supply_update_leds(psy);
+
+               kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
+               spin_lock_irqsave(&psy->changed_lock, flags);
+       }
+       if (!psy->changed)
+               pm_relax(psy->dev);
+       spin_unlock_irqrestore(&psy->changed_lock, flags);
 }
 
 void power_supply_changed(struct power_supply *psy)
 {
+       unsigned long flags;
+
        dev_dbg(psy->dev, "%s\n", __func__);
 
+       spin_lock_irqsave(&psy->changed_lock, flags);
+       psy->changed = true;
+       pm_stay_awake(psy->dev);
+       spin_unlock_irqrestore(&psy->changed_lock, flags);
        schedule_work(&psy->changed_work);
 }
 EXPORT_SYMBOL_GPL(power_supply_changed);
@@ -504,6 +521,11 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
        if (rc)
                goto device_add_failed;
 
+       spin_lock_init(&psy->changed_lock);
+       rc = device_init_wakeup(dev, true);
+       if (rc)
+               goto wakeup_init_failed;
+
        rc = psy_register_thermal(psy);
        if (rc)
                goto register_thermal_failed;
@@ -525,6 +547,7 @@ create_triggers_failed:
 register_cooler_failed:
        psy_unregister_thermal(psy);
 register_thermal_failed:
+wakeup_init_failed:
        device_del(dev);
 kobject_set_name_failed:
 device_add_failed:
index 29178f78d73cfb79868923bfee7ffbe30b393e1b..1f7d79b03725fcb6aad3c8ccfef0549fc7d6e85c 100644 (file)
@@ -105,7 +105,10 @@ static ssize_t power_supply_show_property(struct device *dev,
        else if (off >= POWER_SUPPLY_PROP_MODEL_NAME)
                return sprintf(buf, "%s\n", value.strval);
 
-       return sprintf(buf, "%d\n", value.intval);
+       if (off == POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT)
+               return sprintf(buf, "%lld\n", value.int64val);
+       else
+               return sprintf(buf, "%d\n", value.intval);
 }
 
 static ssize_t power_supply_store_property(struct device *dev,
@@ -189,6 +192,12 @@ static struct device_attribute power_supply_attrs[] = {
        POWER_SUPPLY_ATTR(time_to_full_avg),
        POWER_SUPPLY_ATTR(type),
        POWER_SUPPLY_ATTR(scope),
+       /* Local extensions */
+       POWER_SUPPLY_ATTR(usb_hc),
+       POWER_SUPPLY_ATTR(usb_otg),
+       POWER_SUPPLY_ATTR(charge_enabled),
+       /* Local extensions of type int64_t */
+       POWER_SUPPLY_ATTR(charge_counter_ext),
        /* Properties of type `const char *' */
        POWER_SUPPLY_ATTR(model_name),
        POWER_SUPPLY_ATTR(manufacturer),
index c0c95be0f969de825119ce2a6ee535fd6668a63b..f07d9f2a06f955759614bfa29cff41cca9f6142b 100644 (file)
@@ -19,6 +19,14 @@ config ANDROID_BINDER_IPC
          Android process, using Binder to identify, invoke and pass arguments
          between said processes.
 
+config ANDROID_BINDER_IPC_32BIT
+       bool
+        default y
+       depends on !64BIT && ANDROID_BINDER_IPC
+       ---help---
+         Enable to support an old 32-bit Android user-space. Breaks the new
+         Android user-space.
+
 config ASHMEM
        bool "Enable the Anonymous Shared Memory Subsystem"
        default n
@@ -63,6 +71,15 @@ config ANDROID_LOW_MEMORY_KILLER
        ---help---
          Registers processes to be killed when memory is low
 
+config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+       bool "Android Low Memory Killer: detect oom_adj values"
+       depends on ANDROID_LOW_MEMORY_KILLER
+       default y
+       ---help---
+         Detect oom_adj values written to
+         /sys/module/lowmemorykiller/parameters/adj and convert them
+         to oom_score_adj values.
+
 config ANDROID_INTF_ALARM_DEV
        bool "Android alarm driver"
        depends on RTC_CLASS
@@ -99,6 +116,10 @@ config SW_SYNC_USER
          *WARNING* improper use of this can result in deadlocking kernel
          drivers from userspace.
 
+source "drivers/staging/android/ion/Kconfig"
+
+source "drivers/staging/android/fiq_debugger/Kconfig"
+
 endif # if ANDROID
 
 endmenu
index c136299e05afd6f5657061e3577dc46e549e6d6b..907b62f562035affd91f4a27bead87f8cff6571e 100644 (file)
@@ -1,5 +1,8 @@
 ccflags-y += -I$(src)                  # needed for trace events
 
+obj-y                                  += ion/
+obj-$(CONFIG_FIQ_DEBUGGER)             += fiq_debugger/
+
 obj-$(CONFIG_ANDROID_BINDER_IPC)       += binder.o
 obj-$(CONFIG_ASHMEM)                   += ashmem.o
 obj-$(CONFIG_ANDROID_LOGGER)           += logger.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
deleted file mode 100644 (file)
index b15fb0d..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-TODO:
-       - checkpatch.pl cleanups
-       - sparse fixes
-       - rename files to be not so "generic"
-       - make sure things build as modules properly
-       - add proper arch dependencies as needed
-       - audit userspace interfaces to make sure they are sane
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
-Brian Swetland <swetland@google.com>
index 4fd32f337f9cbb878419794e966fe8b47851a75a..495b20cf3bf67e18e0ebf49ff6322a0414d08869 100644 (file)
 #ifndef _LINUX_ANDROID_ALARM_H
 #define _LINUX_ANDROID_ALARM_H
 
-#include <linux/ioctl.h>
-#include <linux/time.h>
 #include <linux/compat.h>
+#include <linux/ioctl.h>
 
-enum android_alarm_type {
-       /* return code bit numbers or set alarm arg */
-       ANDROID_ALARM_RTC_WAKEUP,
-       ANDROID_ALARM_RTC,
-       ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
-       ANDROID_ALARM_ELAPSED_REALTIME,
-       ANDROID_ALARM_SYSTEMTIME,
-
-       ANDROID_ALARM_TYPE_COUNT,
-
-       /* return code bit numbers */
-       /* ANDROID_ALARM_TIME_CHANGE = 16 */
-};
-
-enum android_alarm_return_flags {
-       ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
-       ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
-       ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
-                               1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
-       ANDROID_ALARM_ELAPSED_REALTIME_MASK =
-                               1U << ANDROID_ALARM_ELAPSED_REALTIME,
-       ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
-       ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
-};
-
-/* Disable alarm */
-#define ANDROID_ALARM_CLEAR(type)           _IO('a', 0 | ((type) << 4))
-
-/* Ack last alarm and wait for next */
-#define ANDROID_ALARM_WAIT                  _IO('a', 1)
-
-#define ALARM_IOW(c, type, size)            _IOW('a', (c) | ((type) << 4), size)
-/* Set alarm */
-#define ANDROID_ALARM_SET(type)             ALARM_IOW(2, type, struct timespec)
-#define ANDROID_ALARM_SET_AND_WAIT(type)    ALARM_IOW(3, type, struct timespec)
-#define ANDROID_ALARM_GET_TIME(type)        ALARM_IOW(4, type, struct timespec)
-#define ANDROID_ALARM_SET_RTC               _IOW('a', 5, struct timespec)
-#define ANDROID_ALARM_BASE_CMD(cmd)         (cmd & ~(_IOC(0, 0, 0xf0, 0)))
-#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd)    (_IOC_NR(cmd) >> 4)
-
+#include "uapi/android_alarm.h"
 
 #ifdef CONFIG_COMPAT
 #define ANDROID_ALARM_SET_COMPAT(type)         ALARM_IOW(2, type, \
index e681bdd9aa5f46fb31e2a1390c3559387b333bf2..3511b0840362b88c878abcd1d2de885d005f053c 100644 (file)
@@ -224,21 +224,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf,
 
        /* If size is not set, or set to 0, always return EOF. */
        if (asma->size == 0)
-               goto out;
+               goto out_unlock;
 
        if (!asma->file) {
                ret = -EBADF;
-               goto out;
+               goto out_unlock;
        }
 
-       ret = asma->file->f_op->read(asma->file, buf, len, pos);
-       if (ret < 0)
-               goto out;
+       mutex_unlock(&ashmem_mutex);
 
-       /** Update backing file pos, since f_ops->read() doesn't */
-       asma->file->f_pos = *pos;
+       /*
+        * asma and asma->file are used outside the lock here.  We assume
+        * once asma->file is set it will never be changed, and will not
+        * be destroyed until all references to the file are dropped and
+        * ashmem_release is called.
+        */
+       ret = asma->file->f_op->read(asma->file, buf, len, pos);
+       if (ret >= 0) {
+               /** Update backing file pos, since f_ops->read() doesn't */
+               asma->file->f_pos = *pos;
+       }
+       return ret;
 
-out:
+out_unlock:
        mutex_unlock(&ashmem_mutex);
        return ret;
 }
@@ -317,22 +325,14 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
        }
        get_file(asma->file);
 
-       /*
-        * XXX - Reworked to use shmem_zero_setup() instead of
-        * shmem_set_file while we're in staging. -jstultz
-        */
-       if (vma->vm_flags & VM_SHARED) {
-               ret = shmem_zero_setup(vma);
-               if (ret) {
-                       fput(asma->file);
-                       goto out;
-               }
+       if (vma->vm_flags & VM_SHARED)
+               shmem_set_file(vma, asma->file);
+       else {
+               if (vma->vm_file)
+                       fput(vma->vm_file);
+               vma->vm_file = asma->file;
        }
 
-       if (vma->vm_file)
-               fput(vma->vm_file);
-       vma->vm_file = asma->file;
-
 out:
        mutex_unlock(&ashmem_mutex);
        return ret;
@@ -413,6 +413,7 @@ out:
 
 static int set_name(struct ashmem_area *asma, void __user *name)
 {
+       int len;
        int ret = 0;
        char local_name[ASHMEM_NAME_LEN];
 
@@ -425,21 +426,19 @@ static int set_name(struct ashmem_area *asma, void __user *name)
         * variable that does not need protection and later copy the local
         * variable to the structure member with lock held.
         */
-       if (copy_from_user(local_name, name, ASHMEM_NAME_LEN))
-               return -EFAULT;
-
+       len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
+       if (len < 0)
+               return len;
+       if (len == ASHMEM_NAME_LEN)
+               local_name[ASHMEM_NAME_LEN - 1] = '\0';
        mutex_lock(&ashmem_mutex);
        /* cannot change an existing mapping's name */
-       if (unlikely(asma->file)) {
+       if (unlikely(asma->file))
                ret = -EINVAL;
-               goto out;
-       }
-       memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN,
-               local_name, ASHMEM_NAME_LEN);
-       asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
-out:
-       mutex_unlock(&ashmem_mutex);
+       else
+               strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
 
+       mutex_unlock(&ashmem_mutex);
        return ret;
 }
 
index 8dc0f0d3adf310b651cf6b17742795ae5269600d..5abcfd7aa706c37ac6c214323429bfbe6dfeff51 100644 (file)
 #include <linux/ioctl.h>
 #include <linux/compat.h>
 
-#define ASHMEM_NAME_LEN                256
-
-#define ASHMEM_NAME_DEF                "dev/ashmem"
-
-/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
-#define ASHMEM_NOT_PURGED      0
-#define ASHMEM_WAS_PURGED      1
-
-/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
-#define ASHMEM_IS_UNPINNED     0
-#define ASHMEM_IS_PINNED       1
-
-struct ashmem_pin {
-       __u32 offset;   /* offset into region, in bytes, page-aligned */
-       __u32 len;      /* length forward from offset, in bytes, page-aligned */
-};
-
-#define __ASHMEMIOC            0x77
-
-#define ASHMEM_SET_NAME                _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
-#define ASHMEM_GET_NAME                _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
-#define ASHMEM_SET_SIZE                _IOW(__ASHMEMIOC, 3, size_t)
-#define ASHMEM_GET_SIZE                _IO(__ASHMEMIOC, 4)
-#define ASHMEM_SET_PROT_MASK   _IOW(__ASHMEMIOC, 5, unsigned long)
-#define ASHMEM_GET_PROT_MASK   _IO(__ASHMEMIOC, 6)
-#define ASHMEM_PIN             _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
-#define ASHMEM_UNPIN           _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
-#define ASHMEM_GET_PIN_STATUS  _IO(__ASHMEMIOC, 9)
-#define ASHMEM_PURGE_ALL_CACHES        _IO(__ASHMEMIOC, 10)
+#include "uapi/ashmem.h"
 
 /* support of 32bit userspace on 64bit platforms */
 #ifdef CONFIG_COMPAT
index 0fce5fc9923b0fb7b603ac52dd7473b319502363..d4e5290019344f2797ab6c2c892500419b4f4d42 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/cacheflush.h>
 #include <linux/fdtable.h>
 #include <linux/file.h>
+#include <linux/freezer.h>
 #include <linux/fs.h>
 #include <linux/list.h>
 #include <linux/miscdevice.h>
@@ -36,6 +37,7 @@
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/pid_namespace.h>
+#include <linux/security.h>
 
 #include "binder.h"
 #include "binder_trace.h"
@@ -227,8 +229,8 @@ struct binder_node {
        int internal_strong_refs;
        int local_weak_refs;
        int local_strong_refs;
-       void __user *ptr;
-       void __user *cookie;
+       binder_uintptr_t ptr;
+       binder_uintptr_t cookie;
        unsigned has_strong_ref:1;
        unsigned pending_strong_ref:1;
        unsigned has_weak_ref:1;
@@ -241,7 +243,7 @@ struct binder_node {
 
 struct binder_ref_death {
        struct binder_work work;
-       void __user *cookie;
+       binder_uintptr_t cookie;
 };
 
 struct binder_ref {
@@ -514,14 +516,14 @@ static void binder_insert_allocated_buffer(struct binder_proc *proc,
 }
 
 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
-                                                 void __user *user_ptr)
+                                                 uintptr_t user_ptr)
 {
        struct rb_node *n = proc->allocated_buffers.rb_node;
        struct binder_buffer *buffer;
        struct binder_buffer *kern_ptr;
 
-       kern_ptr = user_ptr - proc->user_buffer_offset
-               - offsetof(struct binder_buffer, data);
+       kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
+               - offsetof(struct binder_buffer, data));
 
        while (n) {
                buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -790,7 +792,7 @@ static void binder_delete_free_buffer(struct binder_proc *proc,
        list_del(&buffer->entry);
        if (free_page_start || free_page_end) {
                binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                            "%d: merge free, buffer %p do not share page%s%s with with %p or %p\n",
+                            "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
                             proc->pid, buffer, free_page_start ? "" : " end",
                             free_page_end ? "" : " start", prev, next);
                binder_update_page_range(proc, 0, free_page_start ?
@@ -855,7 +857,7 @@ static void binder_free_buf(struct binder_proc *proc,
 }
 
 static struct binder_node *binder_get_node(struct binder_proc *proc,
-                                          void __user *ptr)
+                                          binder_uintptr_t ptr)
 {
        struct rb_node *n = proc->nodes.rb_node;
        struct binder_node *node;
@@ -874,8 +876,8 @@ static struct binder_node *binder_get_node(struct binder_proc *proc,
 }
 
 static struct binder_node *binder_new_node(struct binder_proc *proc,
-                                          void __user *ptr,
-                                          void __user *cookie)
+                                          binder_uintptr_t ptr,
+                                          binder_uintptr_t cookie)
 {
        struct rb_node **p = &proc->nodes.rb_node;
        struct rb_node *parent = NULL;
@@ -907,9 +909,9 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
        INIT_LIST_HEAD(&node->work.entry);
        INIT_LIST_HEAD(&node->async_todo);
        binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-                    "%d:%d node %d u%p c%p created\n",
+                    "%d:%d node %d u%016llx c%016llx created\n",
                     proc->pid, current->pid, node->debug_id,
-                    node->ptr, node->cookie);
+                    (u64)node->ptr, (u64)node->cookie);
        return node;
 }
 
@@ -1225,9 +1227,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
 
 static void binder_transaction_buffer_release(struct binder_proc *proc,
                                              struct binder_buffer *buffer,
-                                             size_t *failed_at)
+                                             binder_size_t *failed_at)
 {
-       size_t *offp, *off_end;
+       binder_size_t *offp, *off_end;
        int debug_id = buffer->debug_id;
 
        binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -1238,7 +1240,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
        if (buffer->target_node)
                binder_dec_node(buffer->target_node, 1, 0);
 
-       offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
+       offp = (binder_size_t *)(buffer->data +
+                                ALIGN(buffer->data_size, sizeof(void *)));
        if (failed_at)
                off_end = failed_at;
        else
@@ -1247,9 +1250,9 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
                struct flat_binder_object *fp;
                if (*offp > buffer->data_size - sizeof(*fp) ||
                    buffer->data_size < sizeof(*fp) ||
-                   !IS_ALIGNED(*offp, sizeof(void *))) {
-                       pr_err("transaction release %d bad offset %zd, size %zd\n",
-                        debug_id, *offp, buffer->data_size);
+                   !IS_ALIGNED(*offp, sizeof(u32))) {
+                       pr_err("transaction release %d bad offset %lld, size %zd\n",
+                              debug_id, (u64)*offp, buffer->data_size);
                        continue;
                }
                fp = (struct flat_binder_object *)(buffer->data + *offp);
@@ -1258,20 +1261,20 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
                case BINDER_TYPE_WEAK_BINDER: {
                        struct binder_node *node = binder_get_node(proc, fp->binder);
                        if (node == NULL) {
-                               pr_err("transaction release %d bad node %p\n",
-                                       debug_id, fp->binder);
+                               pr_err("transaction release %d bad node %016llx\n",
+                                      debug_id, (u64)fp->binder);
                                break;
                        }
                        binder_debug(BINDER_DEBUG_TRANSACTION,
-                                    "        node %d u%p\n",
-                                    node->debug_id, node->ptr);
+                                    "        node %d u%016llx\n",
+                                    node->debug_id, (u64)node->ptr);
                        binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
                } break;
                case BINDER_TYPE_HANDLE:
                case BINDER_TYPE_WEAK_HANDLE: {
                        struct binder_ref *ref = binder_get_ref(proc, fp->handle);
                        if (ref == NULL) {
-                               pr_err("transaction release %d bad handle %ld\n",
+                               pr_err("transaction release %d bad handle %d\n",
                                 debug_id, fp->handle);
                                break;
                        }
@@ -1283,13 +1286,13 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
 
                case BINDER_TYPE_FD:
                        binder_debug(BINDER_DEBUG_TRANSACTION,
-                                    "        fd %ld\n", fp->handle);
+                                    "        fd %d\n", fp->handle);
                        if (failed_at)
                                task_close_fd(proc, fp->handle);
                        break;
 
                default:
-                       pr_err("transaction release %d bad object type %lx\n",
+                       pr_err("transaction release %d bad object type %x\n",
                                debug_id, fp->type);
                        break;
                }
@@ -1302,7 +1305,8 @@ static void binder_transaction(struct binder_proc *proc,
 {
        struct binder_transaction *t;
        struct binder_work *tcomplete;
-       size_t *offp, *off_end;
+       binder_size_t *offp, *off_end;
+       binder_size_t off_min;
        struct binder_proc *target_proc;
        struct binder_thread *target_thread = NULL;
        struct binder_node *target_node = NULL;
@@ -1382,6 +1386,10 @@ static void binder_transaction(struct binder_proc *proc,
                        return_error = BR_DEAD_REPLY;
                        goto err_dead_binder;
                }
+               if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {
+                       return_error = BR_FAILED_REPLY;
+                       goto err_invalid_target_handle;
+               }
                if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
                        struct binder_transaction *tmp;
                        tmp = thread->transaction_stack;
@@ -1431,18 +1439,20 @@ static void binder_transaction(struct binder_proc *proc,
 
        if (reply)
                binder_debug(BINDER_DEBUG_TRANSACTION,
-                            "%d:%d BC_REPLY %d -> %d:%d, data %p-%p size %zd-%zd\n",
+                            "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
                             proc->pid, thread->pid, t->debug_id,
                             target_proc->pid, target_thread->pid,
-                            tr->data.ptr.buffer, tr->data.ptr.offsets,
-                            tr->data_size, tr->offsets_size);
+                            (u64)tr->data.ptr.buffer,
+                            (u64)tr->data.ptr.offsets,
+                            (u64)tr->data_size, (u64)tr->offsets_size);
        else
                binder_debug(BINDER_DEBUG_TRANSACTION,
-                            "%d:%d BC_TRANSACTION %d -> %d - node %d, data %p-%p size %zd-%zd\n",
+                            "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
                             proc->pid, thread->pid, t->debug_id,
                             target_proc->pid, target_node->debug_id,
-                            tr->data.ptr.buffer, tr->data.ptr.offsets,
-                            tr->data_size, tr->offsets_size);
+                            (u64)tr->data.ptr.buffer,
+                            (u64)tr->data.ptr.offsets,
+                            (u64)tr->data_size, (u64)tr->offsets_size);
 
        if (!reply && !(tr->flags & TF_ONE_WAY))
                t->from = thread;
@@ -1471,38 +1481,47 @@ static void binder_transaction(struct binder_proc *proc,
        if (target_node)
                binder_inc_node(target_node, 1, 0, NULL);
 
-       offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
+       offp = (binder_size_t *)(t->buffer->data +
+                                ALIGN(tr->data_size, sizeof(void *)));
 
-       if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
+       if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
+                          tr->data.ptr.buffer, tr->data_size)) {
                binder_user_error("%d:%d got transaction with invalid data ptr\n",
                                proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
                goto err_copy_data_failed;
        }
-       if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
+       if (copy_from_user(offp, (const void __user *)(uintptr_t)
+                          tr->data.ptr.offsets, tr->offsets_size)) {
                binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
                                proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
                goto err_copy_data_failed;
        }
-       if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
-               binder_user_error("%d:%d got transaction with invalid offsets size, %zd\n",
-                               proc->pid, thread->pid, tr->offsets_size);
+       if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
+               binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
+                               proc->pid, thread->pid, (u64)tr->offsets_size);
                return_error = BR_FAILED_REPLY;
                goto err_bad_offset;
        }
        off_end = (void *)offp + tr->offsets_size;
+       off_min = 0;
        for (; offp < off_end; offp++) {
                struct flat_binder_object *fp;
                if (*offp > t->buffer->data_size - sizeof(*fp) ||
+                   *offp < off_min ||
                    t->buffer->data_size < sizeof(*fp) ||
-                   !IS_ALIGNED(*offp, sizeof(void *))) {
-                       binder_user_error("%d:%d got transaction with invalid offset, %zd\n",
-                                       proc->pid, thread->pid, *offp);
+                   !IS_ALIGNED(*offp, sizeof(u32))) {
+                       binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
+                                         proc->pid, thread->pid, (u64)*offp,
+                                         (u64)off_min,
+                                         (u64)(t->buffer->data_size -
+                                         sizeof(*fp)));
                        return_error = BR_FAILED_REPLY;
                        goto err_bad_offset;
                }
                fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+               off_min = *offp + sizeof(struct flat_binder_object);
                switch (fp->type) {
                case BINDER_TYPE_BINDER:
                case BINDER_TYPE_WEAK_BINDER: {
@@ -1518,10 +1537,14 @@ static void binder_transaction(struct binder_proc *proc,
                                node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
                        }
                        if (fp->cookie != node->cookie) {
-                               binder_user_error("%d:%d sending u%p node %d, cookie mismatch %p != %p\n",
+                               binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
                                        proc->pid, thread->pid,
-                                       fp->binder, node->debug_id,
-                                       fp->cookie, node->cookie);
+                                       (u64)fp->binder, node->debug_id,
+                                       (u64)fp->cookie, (u64)node->cookie);
+                               goto err_binder_get_ref_for_node_failed;
+                       }
+                       if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+                               return_error = BR_FAILED_REPLY;
                                goto err_binder_get_ref_for_node_failed;
                        }
                        ref = binder_get_ref_for_node(target_proc, node);
@@ -1539,20 +1562,24 @@ static void binder_transaction(struct binder_proc *proc,
 
                        trace_binder_transaction_node_to_ref(t, node, ref);
                        binder_debug(BINDER_DEBUG_TRANSACTION,
-                                    "        node %d u%p -> ref %d desc %d\n",
-                                    node->debug_id, node->ptr, ref->debug_id,
-                                    ref->desc);
+                                    "        node %d u%016llx -> ref %d desc %d\n",
+                                    node->debug_id, (u64)node->ptr,
+                                    ref->debug_id, ref->desc);
                } break;
                case BINDER_TYPE_HANDLE:
                case BINDER_TYPE_WEAK_HANDLE: {
                        struct binder_ref *ref = binder_get_ref(proc, fp->handle);
                        if (ref == NULL) {
-                               binder_user_error("%d:%d got transaction with invalid handle, %ld\n",
+                               binder_user_error("%d:%d got transaction with invalid handle, %d\n",
                                                proc->pid,
                                                thread->pid, fp->handle);
                                return_error = BR_FAILED_REPLY;
                                goto err_binder_get_ref_failed;
                        }
+                       if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+                               return_error = BR_FAILED_REPLY;
+                               goto err_binder_get_ref_failed;
+                       }
                        if (ref->node->proc == target_proc) {
                                if (fp->type == BINDER_TYPE_HANDLE)
                                        fp->type = BINDER_TYPE_BINDER;
@@ -1563,9 +1590,9 @@ static void binder_transaction(struct binder_proc *proc,
                                binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
                                trace_binder_transaction_ref_to_node(t, ref);
                                binder_debug(BINDER_DEBUG_TRANSACTION,
-                                            "        ref %d desc %d -> node %d u%p\n",
+                                            "        ref %d desc %d -> node %d u%016llx\n",
                                             ref->debug_id, ref->desc, ref->node->debug_id,
-                                            ref->node->ptr);
+                                            (u64)ref->node->ptr);
                        } else {
                                struct binder_ref *new_ref;
                                new_ref = binder_get_ref_for_node(target_proc, ref->node);
@@ -1590,13 +1617,13 @@ static void binder_transaction(struct binder_proc *proc,
 
                        if (reply) {
                                if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
-                                       binder_user_error("%d:%d got reply with fd, %ld, but target does not allow fds\n",
+                                       binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
                                                proc->pid, thread->pid, fp->handle);
                                        return_error = BR_FAILED_REPLY;
                                        goto err_fd_not_allowed;
                                }
                        } else if (!target_node->accept_fds) {
-                               binder_user_error("%d:%d got transaction with fd, %ld, but target does not allow fds\n",
+                               binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
                                        proc->pid, thread->pid, fp->handle);
                                return_error = BR_FAILED_REPLY;
                                goto err_fd_not_allowed;
@@ -1604,11 +1631,16 @@ static void binder_transaction(struct binder_proc *proc,
 
                        file = fget(fp->handle);
                        if (file == NULL) {
-                               binder_user_error("%d:%d got transaction with invalid fd, %ld\n",
+                               binder_user_error("%d:%d got transaction with invalid fd, %d\n",
                                        proc->pid, thread->pid, fp->handle);
                                return_error = BR_FAILED_REPLY;
                                goto err_fget_failed;
                        }
+                       if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) {
+                               fput(file);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_get_unused_fd_failed;
+                       }
                        target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
                        if (target_fd < 0) {
                                fput(file);
@@ -1618,13 +1650,13 @@ static void binder_transaction(struct binder_proc *proc,
                        task_fd_install(target_proc, target_fd, file);
                        trace_binder_transaction_fd(t, fp->handle, target_fd);
                        binder_debug(BINDER_DEBUG_TRANSACTION,
-                                    "        fd %ld -> %d\n", fp->handle, target_fd);
+                                    "        fd %d -> %d\n", fp->handle, target_fd);
                        /* TODO: fput? */
                        fp->handle = target_fd;
                } break;
 
                default:
-                       binder_user_error("%d:%d got transaction with invalid object type, %lx\n",
+                       binder_user_error("%d:%d got transaction with invalid object type, %x\n",
                                proc->pid, thread->pid, fp->type);
                        return_error = BR_FAILED_REPLY;
                        goto err_bad_object_type;
@@ -1681,9 +1713,9 @@ err_dead_binder:
 err_invalid_target_handle:
 err_no_context_mgr_node:
        binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
-                    "%d:%d transaction failed %d, size %zd-%zd\n",
+                    "%d:%d transaction failed %d, size %lld-%lld\n",
                     proc->pid, thread->pid, return_error,
-                    tr->data_size, tr->offsets_size);
+                    (u64)tr->data_size, (u64)tr->offsets_size);
 
        {
                struct binder_transaction_log_entry *fe;
@@ -1700,9 +1732,11 @@ err_no_context_mgr_node:
 }
 
 int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
-                       void __user *buffer, int size, signed long *consumed)
+                       binder_uintptr_t binder_buffer, size_t size,
+                       binder_size_t *consumed)
 {
        uint32_t cmd;
+       void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
        void __user *ptr = buffer + *consumed;
        void __user *end = buffer + size;
 
@@ -1771,33 +1805,33 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
                }
                case BC_INCREFS_DONE:
                case BC_ACQUIRE_DONE: {
-                       void __user *node_ptr;
-                       void *cookie;
+                       binder_uintptr_t node_ptr;
+                       binder_uintptr_t cookie;
                        struct binder_node *node;
 
-                       if (get_user(node_ptr, (void * __user *)ptr))
+                       if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
                                return -EFAULT;
-                       ptr += sizeof(void *);
-                       if (get_user(cookie, (void * __user *)ptr))
+                       ptr += sizeof(binder_uintptr_t);
+                       if (get_user(cookie, (binder_uintptr_t __user *)ptr))
                                return -EFAULT;
-                       ptr += sizeof(void *);
+                       ptr += sizeof(binder_uintptr_t);
                        node = binder_get_node(proc, node_ptr);
                        if (node == NULL) {
-                               binder_user_error("%d:%d %s u%p no match\n",
+                               binder_user_error("%d:%d %s u%016llx no match\n",
                                        proc->pid, thread->pid,
                                        cmd == BC_INCREFS_DONE ?
                                        "BC_INCREFS_DONE" :
                                        "BC_ACQUIRE_DONE",
-                                       node_ptr);
+                                       (u64)node_ptr);
                                break;
                        }
                        if (cookie != node->cookie) {
-                               binder_user_error("%d:%d %s u%p node %d cookie mismatch %p != %p\n",
+                               binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
                                        proc->pid, thread->pid,
                                        cmd == BC_INCREFS_DONE ?
                                        "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
-                                       node_ptr, node->debug_id,
-                                       cookie, node->cookie);
+                                       (u64)node_ptr, node->debug_id,
+                                       (u64)cookie, (u64)node->cookie);
                                break;
                        }
                        if (cmd == BC_ACQUIRE_DONE) {
@@ -1833,27 +1867,27 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
                        return -EINVAL;
 
                case BC_FREE_BUFFER: {
-                       void __user *data_ptr;
+                       binder_uintptr_t data_ptr;
                        struct binder_buffer *buffer;
 
-                       if (get_user(data_ptr, (void * __user *)ptr))
+                       if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
                                return -EFAULT;
-                       ptr += sizeof(void *);
+                       ptr += sizeof(binder_uintptr_t);
 
                        buffer = binder_buffer_lookup(proc, data_ptr);
                        if (buffer == NULL) {
-                               binder_user_error("%d:%d BC_FREE_BUFFER u%p no match\n",
-                                       proc->pid, thread->pid, data_ptr);
+                               binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
+                                       proc->pid, thread->pid, (u64)data_ptr);
                                break;
                        }
                        if (!buffer->allow_user_free) {
-                               binder_user_error("%d:%d BC_FREE_BUFFER u%p matched unreturned buffer\n",
-                                       proc->pid, thread->pid, data_ptr);
+                               binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
+                                       proc->pid, thread->pid, (u64)data_ptr);
                                break;
                        }
                        binder_debug(BINDER_DEBUG_FREE_BUFFER,
-                                    "%d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
-                                    proc->pid, thread->pid, data_ptr, buffer->debug_id,
+                                    "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
+                                    proc->pid, thread->pid, (u64)data_ptr, buffer->debug_id,
                                     buffer->transaction ? "active" : "finished");
 
                        if (buffer->transaction) {
@@ -1923,16 +1957,16 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
                case BC_REQUEST_DEATH_NOTIFICATION:
                case BC_CLEAR_DEATH_NOTIFICATION: {
                        uint32_t target;
-                       void __user *cookie;
+                       binder_uintptr_t cookie;
                        struct binder_ref *ref;
                        struct binder_ref_death *death;
 
                        if (get_user(target, (uint32_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(uint32_t);
-                       if (get_user(cookie, (void __user * __user *)ptr))
+                       if (get_user(cookie, (binder_uintptr_t __user *)ptr))
                                return -EFAULT;
-                       ptr += sizeof(void *);
+                       ptr += sizeof(binder_uintptr_t);
                        ref = binder_get_ref(proc, target);
                        if (ref == NULL) {
                                binder_user_error("%d:%d %s invalid ref %d\n",
@@ -1945,12 +1979,12 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
                        }
 
                        binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
-                                    "%d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
+                                    "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
                                     proc->pid, thread->pid,
                                     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
                                     "BC_REQUEST_DEATH_NOTIFICATION" :
                                     "BC_CLEAR_DEATH_NOTIFICATION",
-                                    cookie, ref->debug_id, ref->desc,
+                                    (u64)cookie, ref->debug_id, ref->desc,
                                     ref->strong, ref->weak, ref->node->debug_id);
 
                        if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
@@ -1988,9 +2022,9 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
                                }
                                death = ref->death;
                                if (death->cookie != cookie) {
-                                       binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %p != %p\n",
+                                       binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
                                                proc->pid, thread->pid,
-                                               death->cookie, cookie);
+                                               (u64)death->cookie, (u64)cookie);
                                        break;
                                }
                                ref->death = NULL;
@@ -2010,9 +2044,9 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
                } break;
                case BC_DEAD_BINDER_DONE: {
                        struct binder_work *w;
-                       void __user *cookie;
+                       binder_uintptr_t cookie;
                        struct binder_ref_death *death = NULL;
-                       if (get_user(cookie, (void __user * __user *)ptr))
+                       if (get_user(cookie, (binder_uintptr_t __user *)ptr))
                                return -EFAULT;
 
                        ptr += sizeof(void *);
@@ -2024,11 +2058,11 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
                                }
                        }
                        binder_debug(BINDER_DEBUG_DEAD_BINDER,
-                                    "%d:%d BC_DEAD_BINDER_DONE %p found %p\n",
-                                    proc->pid, thread->pid, cookie, death);
+                                    "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
+                                    proc->pid, thread->pid, (u64)cookie, death);
                        if (death == NULL) {
-                               binder_user_error("%d:%d BC_DEAD_BINDER_DONE %p not found\n",
-                                       proc->pid, thread->pid, cookie);
+                               binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
+                                       proc->pid, thread->pid, (u64)cookie);
                                break;
                        }
 
@@ -2080,9 +2114,10 @@ static int binder_has_thread_work(struct binder_thread *thread)
 
 static int binder_thread_read(struct binder_proc *proc,
                              struct binder_thread *thread,
-                             void  __user *buffer, int size,
-                             signed long *consumed, int non_block)
+                             binder_uintptr_t binder_buffer, size_t size,
+                             binder_size_t *consumed, int non_block)
 {
+       void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
        void __user *ptr = buffer + *consumed;
        void __user *end = buffer + size;
 
@@ -2140,13 +2175,13 @@ retry:
                        if (!binder_has_proc_work(proc, thread))
                                ret = -EAGAIN;
                } else
-                       ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
+                       ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
        } else {
                if (non_block) {
                        if (!binder_has_thread_work(thread))
                                ret = -EAGAIN;
                } else
-                       ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
+                       ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
        }
 
        binder_lock(__func__);
@@ -2227,32 +2262,36 @@ retry:
                                if (put_user(cmd, (uint32_t __user *)ptr))
                                        return -EFAULT;
                                ptr += sizeof(uint32_t);
-                               if (put_user(node->ptr, (void * __user *)ptr))
+                               if (put_user(node->ptr,
+                                            (binder_uintptr_t __user *)ptr))
                                        return -EFAULT;
-                               ptr += sizeof(void *);
-                               if (put_user(node->cookie, (void * __user *)ptr))
+                               ptr += sizeof(binder_uintptr_t);
+                               if (put_user(node->cookie,
+                                            (binder_uintptr_t __user *)ptr))
                                        return -EFAULT;
-                               ptr += sizeof(void *);
+                               ptr += sizeof(binder_uintptr_t);
 
                                binder_stat_br(proc, thread, cmd);
                                binder_debug(BINDER_DEBUG_USER_REFS,
-                                            "%d:%d %s %d u%p c%p\n",
-                                            proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
+                                            "%d:%d %s %d u%016llx c%016llx\n",
+                                            proc->pid, thread->pid, cmd_name,
+                                            node->debug_id,
+                                            (u64)node->ptr, (u64)node->cookie);
                        } else {
                                list_del_init(&w->entry);
                                if (!weak && !strong) {
                                        binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-                                                    "%d:%d node %d u%p c%p deleted\n",
+                                                    "%d:%d node %d u%016llx c%016llx deleted\n",
                                                     proc->pid, thread->pid, node->debug_id,
-                                                    node->ptr, node->cookie);
+                                                    (u64)node->ptr, (u64)node->cookie);
                                        rb_erase(&node->rb_node, &proc->nodes);
                                        kfree(node);
                                        binder_stats_deleted(BINDER_STAT_NODE);
                                } else {
                                        binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-                                                    "%d:%d node %d u%p c%p state unchanged\n",
-                                                    proc->pid, thread->pid, node->debug_id, node->ptr,
-                                                    node->cookie);
+                                                    "%d:%d node %d u%016llx c%016llx state unchanged\n",
+                                                    proc->pid, thread->pid, node->debug_id,
+                                                    (u64)node->ptr, (u64)node->cookie);
                                }
                        }
                } break;
@@ -2270,17 +2309,18 @@ retry:
                        if (put_user(cmd, (uint32_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(uint32_t);
-                       if (put_user(death->cookie, (void * __user *)ptr))
+                       if (put_user(death->cookie,
+                                    (binder_uintptr_t __user *)ptr))
                                return -EFAULT;
-                       ptr += sizeof(void *);
+                       ptr += sizeof(binder_uintptr_t);
                        binder_stat_br(proc, thread, cmd);
                        binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
-                                    "%d:%d %s %p\n",
+                                    "%d:%d %s %016llx\n",
                                      proc->pid, thread->pid,
                                      cmd == BR_DEAD_BINDER ?
                                      "BR_DEAD_BINDER" :
                                      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
-                                     death->cookie);
+                                     (u64)death->cookie);
 
                        if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
                                list_del(&w->entry);
@@ -2310,8 +2350,8 @@ retry:
                                binder_set_nice(target_node->min_priority);
                        cmd = BR_TRANSACTION;
                } else {
-                       tr.target.ptr = NULL;
-                       tr.cookie = NULL;
+                       tr.target.ptr = 0;
+                       tr.cookie = 0;
                        cmd = BR_REPLY;
                }
                tr.code = t->code;
@@ -2328,8 +2368,9 @@ retry:
 
                tr.data_size = t->buffer->data_size;
                tr.offsets_size = t->buffer->offsets_size;
-               tr.data.ptr.buffer = (void *)t->buffer->data +
-                                       proc->user_buffer_offset;
+               tr.data.ptr.buffer = (binder_uintptr_t)(
+                                       (uintptr_t)t->buffer->data +
+                                       proc->user_buffer_offset);
                tr.data.ptr.offsets = tr.data.ptr.buffer +
                                        ALIGN(t->buffer->data_size,
                                            sizeof(void *));
@@ -2344,14 +2385,14 @@ retry:
                trace_binder_transaction_received(t);
                binder_stat_br(proc, thread, cmd);
                binder_debug(BINDER_DEBUG_TRANSACTION,
-                            "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %p-%p\n",
+                            "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
                             proc->pid, thread->pid,
                             (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
                             "BR_REPLY",
                             t->debug_id, t->from ? t->from->proc->pid : 0,
                             t->from ? t->from->pid : 0, cmd,
                             t->buffer->data_size, t->buffer->offsets_size,
-                            tr.data.ptr.buffer, tr.data.ptr.offsets);
+                            (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
 
                list_del(&t->work.entry);
                t->buffer->allow_user_free = 1;
@@ -2421,8 +2462,8 @@ static void binder_release_work(struct list_head *list)
 
                        death = container_of(w, struct binder_ref_death, work);
                        binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
-                               "undelivered death notification, %p\n",
-                               death->cookie);
+                               "undelivered death notification, %016llx\n",
+                               (u64)death->cookie);
                        kfree(death);
                        binder_stats_deleted(BINDER_STAT_DEATH);
                } break;
@@ -2578,12 +2619,13 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                        goto err;
                }
                binder_debug(BINDER_DEBUG_READ_WRITE,
-                            "%d:%d write %ld at %08lx, read %ld at %08lx\n",
-                            proc->pid, thread->pid, bwr.write_size,
-                            bwr.write_buffer, bwr.read_size, bwr.read_buffer);
+                            "%d:%d write %lld at %016llx, read %lld at %016llx\n",
+                            proc->pid, thread->pid,
+                            (u64)bwr.write_size, (u64)bwr.write_buffer,
+                            (u64)bwr.read_size, (u64)bwr.read_buffer);
 
                if (bwr.write_size > 0) {
-                       ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
+                       ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
                        trace_binder_write_done(ret);
                        if (ret < 0) {
                                bwr.read_consumed = 0;
@@ -2593,7 +2635,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                        }
                }
                if (bwr.read_size > 0) {
-                       ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
+                       ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
                        trace_binder_read_done(ret);
                        if (!list_empty(&proc->todo))
                                wake_up_interruptible(&proc->wait);
@@ -2604,9 +2646,10 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                        }
                }
                binder_debug(BINDER_DEBUG_READ_WRITE,
-                            "%d:%d wrote %ld of %ld, read return %ld of %ld\n",
-                            proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
-                            bwr.read_consumed, bwr.read_size);
+                            "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
+                            proc->pid, thread->pid,
+                            (u64)bwr.write_consumed, (u64)bwr.write_size,
+                            (u64)bwr.read_consumed, (u64)bwr.read_size);
                if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
                        ret = -EFAULT;
                        goto err;
@@ -2625,6 +2668,9 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                        ret = -EBUSY;
                        goto err;
                }
+               ret = security_binder_set_context_mgr(proc->tsk);
+               if (ret < 0)
+                       goto err;
                if (uid_valid(binder_context_mgr_uid)) {
                        if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) {
                                pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
@@ -2635,7 +2681,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                        }
                } else
                        binder_context_mgr_uid = current->cred->euid;
-               binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
+               binder_context_mgr_node = binder_new_node(proc, 0, 0);
                if (binder_context_mgr_node == NULL) {
                        ret = -ENOMEM;
                        goto err;
@@ -3130,8 +3176,9 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
                break;
        case BINDER_WORK_NODE:
                node = container_of(w, struct binder_node, work);
-               seq_printf(m, "%snode work %d: u%p c%p\n",
-                          prefix, node->debug_id, node->ptr, node->cookie);
+               seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
+                          prefix, node->debug_id,
+                          (u64)node->ptr, (u64)node->cookie);
                break;
        case BINDER_WORK_DEAD_BINDER:
                seq_printf(m, "%shas dead binder\n", prefix);
@@ -3191,8 +3238,8 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
        hlist_for_each_entry(ref, &node->refs, node_entry)
                count++;
 
-       seq_printf(m, "  node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
-                  node->debug_id, node->ptr, node->cookie,
+       seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
+                  node->debug_id, (u64)node->ptr, (u64)node->cookie,
                   node->has_strong_ref, node->has_weak_ref,
                   node->local_strong_refs, node->local_weak_refs,
                   node->internal_strong_refs, count);
@@ -3494,6 +3541,7 @@ static const struct file_operations binder_fops = {
        .owner = THIS_MODULE,
        .poll = binder_poll,
        .unlocked_ioctl = binder_ioctl,
+       .compat_ioctl = binder_ioctl,
        .mmap = binder_mmap,
        .open = binder_open,
        .flush = binder_flush,
index dbe81ceca1bdd3615373f44c25267cd385646946..eb0834656dfe2fa8810ef7db4444516030a27e11 100644 (file)
 #ifndef _LINUX_BINDER_H
 #define _LINUX_BINDER_H
 
-#include <linux/ioctl.h>
+#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
+#define BINDER_IPC_32BIT 1
+#endif
 
-#define B_PACK_CHARS(c1, c2, c3, c4) \
-       ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
-#define B_TYPE_LARGE 0x85
-
-enum {
-       BINDER_TYPE_BINDER      = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
-       BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
-       BINDER_TYPE_HANDLE      = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
-       BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
-       BINDER_TYPE_FD          = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
-};
-
-enum {
-       FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
-       FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
-};
-
-/*
- * This is the flattened representation of a Binder object for transfer
- * between processes.  The 'offsets' supplied as part of a binder transaction
- * contains offsets into the data where these structures occur.  The Binder
- * driver takes care of re-writing the structure type and data as it moves
- * between processes.
- */
-struct flat_binder_object {
-       /* 8 bytes for large_flat_header. */
-       unsigned long           type;
-       unsigned long           flags;
-
-       /* 8 bytes of data. */
-       union {
-               void __user     *binder;        /* local object */
-               signed long     handle;         /* remote object */
-       };
-
-       /* extra data associated with local object */
-       void __user             *cookie;
-};
-
-/*
- * On 64-bit platforms where user code may run in 32-bits the driver must
- * translate the buffer (and local binder) addresses appropriately.
- */
-
-struct binder_write_read {
-       signed long     write_size;     /* bytes to write */
-       signed long     write_consumed; /* bytes consumed by driver */
-       unsigned long   write_buffer;
-       signed long     read_size;      /* bytes to read */
-       signed long     read_consumed;  /* bytes consumed by driver */
-       unsigned long   read_buffer;
-};
-
-/* Use with BINDER_VERSION, driver fills in fields. */
-struct binder_version {
-       /* driver protocol version -- increment with incompatible change */
-       signed long     protocol_version;
-};
-
-/* This is the current protocol version. */
-#define BINDER_CURRENT_PROTOCOL_VERSION 7
-
-#define BINDER_WRITE_READ              _IOWR('b', 1, struct binder_write_read)
-#define        BINDER_SET_IDLE_TIMEOUT         _IOW('b', 3, __s64)
-#define        BINDER_SET_MAX_THREADS          _IOW('b', 5, size_t)
-#define        BINDER_SET_IDLE_PRIORITY        _IOW('b', 6, __s32)
-#define        BINDER_SET_CONTEXT_MGR          _IOW('b', 7, __s32)
-#define        BINDER_THREAD_EXIT              _IOW('b', 8, __s32)
-#define BINDER_VERSION                 _IOWR('b', 9, struct binder_version)
-
-/*
- * NOTE: Two special error codes you should check for when calling
- * in to the driver are:
- *
- * EINTR -- The operation has been interupted.  This should be
- * handled by retrying the ioctl() until a different error code
- * is returned.
- *
- * ECONNREFUSED -- The driver is no longer accepting operations
- * from your process.  That is, the process is being destroyed.
- * You should handle this by exiting from your process.  Note
- * that once this error code is returned, all further calls to
- * the driver from any thread will return this same code.
- */
-
-enum transaction_flags {
-       TF_ONE_WAY      = 0x01, /* this is a one-way call: async, no return */
-       TF_ROOT_OBJECT  = 0x04, /* contents are the component's root object */
-       TF_STATUS_CODE  = 0x08, /* contents are a 32-bit status code */
-       TF_ACCEPT_FDS   = 0x10, /* allow replies with file descriptors */
-};
-
-struct binder_transaction_data {
-       /* The first two are only used for bcTRANSACTION and brTRANSACTION,
-        * identifying the target and contents of the transaction.
-        */
-       union {
-               size_t  handle; /* target descriptor of command transaction */
-               void    *ptr;   /* target descriptor of return transaction */
-       } target;
-       void            *cookie;        /* target object cookie */
-       unsigned int    code;           /* transaction command */
-
-       /* General information about the transaction. */
-       unsigned int    flags;
-       pid_t           sender_pid;
-       uid_t           sender_euid;
-       size_t          data_size;      /* number of bytes of data */
-       size_t          offsets_size;   /* number of bytes of offsets */
-
-       /* If this transaction is inline, the data immediately
-        * follows here; otherwise, it ends with a pointer to
-        * the data buffer.
-        */
-       union {
-               struct {
-                       /* transaction data */
-                       const void __user       *buffer;
-                       /* offsets from buffer to flat_binder_object structs */
-                       const void __user       *offsets;
-               } ptr;
-               uint8_t buf[8];
-       } data;
-};
-
-struct binder_ptr_cookie {
-       void *ptr;
-       void *cookie;
-};
-
-struct binder_pri_desc {
-       int priority;
-       int desc;
-};
-
-struct binder_pri_ptr_cookie {
-       int priority;
-       void *ptr;
-       void *cookie;
-};
-
-enum binder_driver_return_protocol {
-       BR_ERROR = _IOR('r', 0, int),
-       /*
-        * int: error code
-        */
-
-       BR_OK = _IO('r', 1),
-       /* No parameters! */
-
-       BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
-       BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
-       /*
-        * binder_transaction_data: the received command.
-        */
-
-       BR_ACQUIRE_RESULT = _IOR('r', 4, int),
-       /*
-        * not currently supported
-        * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
-        * Else the remote object has acquired a primary reference.
-        */
-
-       BR_DEAD_REPLY = _IO('r', 5),
-       /*
-        * The target of the last transaction (either a bcTRANSACTION or
-        * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
-        */
-
-       BR_TRANSACTION_COMPLETE = _IO('r', 6),
-       /*
-        * No parameters... always refers to the last transaction requested
-        * (including replies).  Note that this will be sent even for
-        * asynchronous transactions.
-        */
-
-       BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
-       BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
-       BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
-       BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
-       /*
-        * void *:      ptr to binder
-        * void *: cookie for binder
-        */
-
-       BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
-       /*
-        * not currently supported
-        * int: priority
-        * void *: ptr to binder
-        * void *: cookie for binder
-        */
-
-       BR_NOOP = _IO('r', 12),
-       /*
-        * No parameters.  Do nothing and examine the next command.  It exists
-        * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
-        */
-
-       BR_SPAWN_LOOPER = _IO('r', 13),
-       /*
-        * No parameters.  The driver has determined that a process has no
-        * threads waiting to service incoming transactions.  When a process
-        * receives this command, it must spawn a new service thread and
-        * register it via bcENTER_LOOPER.
-        */
-
-       BR_FINISHED = _IO('r', 14),
-       /*
-        * not currently supported
-        * stop threadpool thread
-        */
-
-       BR_DEAD_BINDER = _IOR('r', 15, void *),
-       /*
-        * void *: cookie
-        */
-       BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
-       /*
-        * void *: cookie
-        */
-
-       BR_FAILED_REPLY = _IO('r', 17),
-       /*
-        * The the last transaction (either a bcTRANSACTION or
-        * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
-        */
-};
-
-enum binder_driver_command_protocol {
-       BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
-       BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
-       /*
-        * binder_transaction_data: the sent command.
-        */
-
-       BC_ACQUIRE_RESULT = _IOW('c', 2, int),
-       /*
-        * not currently supported
-        * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
-        * Else you have acquired a primary reference on the object.
-        */
-
-       BC_FREE_BUFFER = _IOW('c', 3, int),
-       /*
-        * void *: ptr to transaction data received on a read
-        */
-
-       BC_INCREFS = _IOW('c', 4, int),
-       BC_ACQUIRE = _IOW('c', 5, int),
-       BC_RELEASE = _IOW('c', 6, int),
-       BC_DECREFS = _IOW('c', 7, int),
-       /*
-        * int: descriptor
-        */
-
-       BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
-       BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
-       /*
-        * void *: ptr to binder
-        * void *: cookie for binder
-        */
-
-       BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
-       /*
-        * not currently supported
-        * int: priority
-        * int: descriptor
-        */
-
-       BC_REGISTER_LOOPER = _IO('c', 11),
-       /*
-        * No parameters.
-        * Register a spawned looper thread with the device.
-        */
-
-       BC_ENTER_LOOPER = _IO('c', 12),
-       BC_EXIT_LOOPER = _IO('c', 13),
-       /*
-        * No parameters.
-        * These two commands are sent as an application-level thread
-        * enters and exits the binder loop, respectively.  They are
-        * used so the binder can have an accurate count of the number
-        * of looping threads it has available.
-        */
-
-       BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
-       /*
-        * void *: ptr to binder
-        * void *: cookie
-        */
-
-       BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
-       /*
-        * void *: ptr to binder
-        * void *: cookie
-        */
-
-       BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
-       /*
-        * void *: cookie
-        */
-};
+#include "uapi/binder.h"
 
 #endif /* _LINUX_BINDER_H */
 
index 82a567c2af6797bdfb5e004adffa51c447b53645..7f20f3dc83690cad36cfa82c2a946384a8c30bb8 100644 (file)
@@ -152,7 +152,7 @@ TRACE_EVENT(binder_transaction_node_to_ref,
        TP_STRUCT__entry(
                __field(int, debug_id)
                __field(int, node_debug_id)
-               __field(void __user *, node_ptr)
+               __field(binder_uintptr_t, node_ptr)
                __field(int, ref_debug_id)
                __field(uint32_t, ref_desc)
        ),
@@ -163,8 +163,9 @@ TRACE_EVENT(binder_transaction_node_to_ref,
                __entry->ref_debug_id = ref->debug_id;
                __entry->ref_desc = ref->desc;
        ),
-       TP_printk("transaction=%d node=%d src_ptr=0x%p ==> dest_ref=%d dest_desc=%d",
-                 __entry->debug_id, __entry->node_debug_id, __entry->node_ptr,
+       TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
+                 __entry->debug_id, __entry->node_debug_id,
+                 (u64)__entry->node_ptr,
                  __entry->ref_debug_id, __entry->ref_desc)
 );
 
@@ -177,7 +178,7 @@ TRACE_EVENT(binder_transaction_ref_to_node,
                __field(int, ref_debug_id)
                __field(uint32_t, ref_desc)
                __field(int, node_debug_id)
-               __field(void __user *, node_ptr)
+               __field(binder_uintptr_t, node_ptr)
        ),
        TP_fast_assign(
                __entry->debug_id = t->debug_id;
@@ -186,9 +187,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
                __entry->node_debug_id = ref->node->debug_id;
                __entry->node_ptr = ref->node->ptr;
        ),
-       TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%p",
+       TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
                  __entry->debug_id, __entry->node_debug_id,
-                 __entry->ref_debug_id, __entry->ref_desc, __entry->node_ptr)
+                 __entry->ref_debug_id, __entry->ref_desc,
+                 (u64)__entry->node_ptr)
 );
 
 TRACE_EVENT(binder_transaction_ref_to_ref,
diff --git a/drivers/staging/android/fiq_debugger/Kconfig b/drivers/staging/android/fiq_debugger/Kconfig
new file mode 100644 (file)
index 0000000..56f7f99
--- /dev/null
@@ -0,0 +1,49 @@
+config FIQ_DEBUGGER
+       bool "FIQ Mode Serial Debugger"
+       default n
+       depends on ARM || ARM64
+       help
+         The FIQ serial debugger can accept commands even when the
+         kernel is unresponsive due to being stuck with interrupts
+         disabled.
+
+config FIQ_DEBUGGER_NO_SLEEP
+       bool "Keep serial debugger active"
+       depends on FIQ_DEBUGGER
+       default n
+       help
+         Enables the serial debugger at boot. Passing
+         fiq_debugger.no_sleep on the kernel commandline will
+         override this config option.
+
+config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+       bool "Don't disable wakeup IRQ when debugger is active"
+       depends on FIQ_DEBUGGER
+       default n
+       help
+         Don't disable the wakeup irq when enabling the uart clock.  This will
+         cause extra interrupts, but it makes the serial debugger usable with
+         on some MSM radio builds that ignore the uart clock request in power
+         collapse.
+
+config FIQ_DEBUGGER_CONSOLE
+       bool "Console on FIQ Serial Debugger port"
+       depends on FIQ_DEBUGGER
+       default n
+       help
+         Enables a console so that printk messages are displayed on
+         the debugger serial port as the occur.
+
+config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+       bool "Put the FIQ debugger into console mode by default"
+       depends on FIQ_DEBUGGER_CONSOLE
+       default n
+       help
+         If enabled, this puts the fiq debugger into console mode by default.
+         Otherwise, the fiq debugger will start out in debug mode.
+
+config FIQ_WATCHDOG
+       bool
+       select FIQ_DEBUGGER
+       select PSTORE_RAM
+       default n
diff --git a/drivers/staging/android/fiq_debugger/Makefile b/drivers/staging/android/fiq_debugger/Makefile
new file mode 100644 (file)
index 0000000..a7ca487
--- /dev/null
@@ -0,0 +1,4 @@
+obj-y                  += fiq_debugger.o
+obj-$(CONFIG_ARM)      += fiq_debugger_arm.o
+obj-$(CONFIG_ARM64)    += fiq_debugger_arm64.o
+obj-$(CONFIG_FIQ_WATCHDOG)     += fiq_watchdog.o
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c
new file mode 100644 (file)
index 0000000..7d6b4ae
--- /dev/null
@@ -0,0 +1,1212 @@
+/*
+ * drivers/staging/android/fiq_debugger.c
+ *
+ * Serial Debugger Interface accessed through an FIQ interrupt.
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/kernel_stat.h>
+#include <linux/kmsg_dump.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/wakelock.h>
+
+#ifdef CONFIG_FIQ_GLUE
+#include <asm/fiq_glue.h>
+#endif
+
+#include <linux/uaccess.h>
+
+#include "fiq_debugger.h"
+#include "fiq_debugger_priv.h"
+#include "fiq_debugger_ringbuf.h"
+
+#define DEBUG_MAX 64
+#define MAX_UNHANDLED_FIQ_COUNT 1000000
+
+#define MAX_FIQ_DEBUGGER_PORTS 4
+
+struct fiq_debugger_state {
+#ifdef CONFIG_FIQ_GLUE
+       struct fiq_glue_handler handler;
+#endif
+       struct fiq_debugger_output output;
+
+       int fiq;
+       int uart_irq;
+       int signal_irq;
+       int wakeup_irq;
+       bool wakeup_irq_no_set_wake;
+       struct clk *clk;
+       struct fiq_debugger_pdata *pdata;
+       struct platform_device *pdev;
+
+       char debug_cmd[DEBUG_MAX];
+       int debug_busy;
+       int debug_abort;
+
+       char debug_buf[DEBUG_MAX];
+       int debug_count;
+
+       bool no_sleep;
+       bool debug_enable;
+       bool ignore_next_wakeup_irq;
+       struct timer_list sleep_timer;
+       spinlock_t sleep_timer_lock;
+       bool uart_enabled;
+       struct wake_lock debugger_wake_lock;
+       bool console_enable;
+       int current_cpu;
+       atomic_t unhandled_fiq_count;
+       bool in_fiq;
+
+       struct work_struct work;
+       spinlock_t work_lock;
+       char work_cmd[DEBUG_MAX];
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+       spinlock_t console_lock;
+       struct console console;
+       struct tty_port tty_port;
+       struct fiq_debugger_ringbuf *tty_rbuf;
+       bool syslog_dumping;
+#endif
+
+       unsigned int last_irqs[NR_IRQS];
+       unsigned int last_local_timer_irqs[NR_CPUS];
+};
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+struct tty_driver *fiq_tty_driver;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP
+static bool initial_no_sleep = true;
+#else
+static bool initial_no_sleep;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+static bool initial_debug_enable = true;
+static bool initial_console_enable = true;
+#else
+static bool initial_debug_enable;
+static bool initial_console_enable;
+#endif
+
+static bool fiq_kgdb_enable;
+
+module_param_named(no_sleep, initial_no_sleep, bool, 0644);
+module_param_named(debug_enable, initial_debug_enable, bool, 0644);
+module_param_named(console_enable, initial_console_enable, bool, 0644);
+module_param_named(kgdb_enable, fiq_kgdb_enable, bool, 0644);
+
+#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+static inline
+void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state) {}
+static inline
+void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state) {}
+#else
+static inline
+void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state)
+{
+       if (state->wakeup_irq < 0)
+               return;
+       enable_irq(state->wakeup_irq);
+       if (!state->wakeup_irq_no_set_wake)
+               enable_irq_wake(state->wakeup_irq);
+}
+static inline
+void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state)
+{
+       if (state->wakeup_irq < 0)
+               return;
+       disable_irq_nosync(state->wakeup_irq);
+       if (!state->wakeup_irq_no_set_wake)
+               disable_irq_wake(state->wakeup_irq);
+}
+#endif
+
+static inline bool fiq_debugger_have_fiq(struct fiq_debugger_state *state)
+{
+       return (state->fiq >= 0);
+}
+
+#ifdef CONFIG_FIQ_GLUE
+static void fiq_debugger_force_irq(struct fiq_debugger_state *state)
+{
+       unsigned int irq = state->signal_irq;
+
+       if (WARN_ON(!fiq_debugger_have_fiq(state)))
+               return;
+       if (state->pdata->force_irq) {
+               state->pdata->force_irq(state->pdev, irq);
+       } else {
+               struct irq_chip *chip = irq_get_chip(irq);
+               if (chip && chip->irq_retrigger)
+                       chip->irq_retrigger(irq_get_irq_data(irq));
+       }
+}
+#endif
+
+static void fiq_debugger_uart_enable(struct fiq_debugger_state *state)
+{
+       if (state->clk)
+               clk_enable(state->clk);
+       if (state->pdata->uart_enable)
+               state->pdata->uart_enable(state->pdev);
+}
+
+static void fiq_debugger_uart_disable(struct fiq_debugger_state *state)
+{
+       if (state->pdata->uart_disable)
+               state->pdata->uart_disable(state->pdev);
+       if (state->clk)
+               clk_disable(state->clk);
+}
+
+static void fiq_debugger_uart_flush(struct fiq_debugger_state *state)
+{
+       if (state->pdata->uart_flush)
+               state->pdata->uart_flush(state->pdev);
+}
+
+static void fiq_debugger_putc(struct fiq_debugger_state *state, char c)
+{
+       state->pdata->uart_putc(state->pdev, c);
+}
+
+static void fiq_debugger_puts(struct fiq_debugger_state *state, char *s)
+{
+       unsigned c;
+       while ((c = *s++)) {
+               if (c == '\n')
+                       fiq_debugger_putc(state, '\r');
+               fiq_debugger_putc(state, c);
+       }
+}
+
+static void fiq_debugger_prompt(struct fiq_debugger_state *state)
+{
+       fiq_debugger_puts(state, "debug> ");
+}
+
+static void fiq_debugger_dump_kernel_log(struct fiq_debugger_state *state)
+{
+       char buf[512];
+       size_t len;
+       struct kmsg_dumper dumper = { .active = true };
+
+
+       kmsg_dump_rewind_nolock(&dumper);
+       while (kmsg_dump_get_line_nolock(&dumper, true, buf,
+                                        sizeof(buf) - 1, &len)) {
+               buf[len] = 0;
+               fiq_debugger_puts(state, buf);
+       }
+}
+
+static void fiq_debugger_printf(struct fiq_debugger_output *output,
+                              const char *fmt, ...)
+{
+       struct fiq_debugger_state *state;
+       char buf[256];
+       va_list ap;
+
+       state = container_of(output, struct fiq_debugger_state, output);
+       va_start(ap, fmt);
+       vsnprintf(buf, sizeof(buf), fmt, ap);
+       va_end(ap);
+
+       fiq_debugger_puts(state, buf);
+}
+
+/* Safe outside fiq context */
+static int fiq_debugger_printf_nfiq(void *cookie, const char *fmt, ...)
+{
+       struct fiq_debugger_state *state = cookie;
+       char buf[256];
+       va_list ap;
+       unsigned long irq_flags;
+
+       va_start(ap, fmt);
+       vsnprintf(buf, 128, fmt, ap);
+       va_end(ap);
+
+       local_irq_save(irq_flags);
+       fiq_debugger_puts(state, buf);
+       fiq_debugger_uart_flush(state);
+       local_irq_restore(irq_flags);
+       return state->debug_abort;
+}
+
+static void fiq_debugger_dump_irqs(struct fiq_debugger_state *state)
+{
+       int n;
+       struct irq_desc *desc;
+
+       fiq_debugger_printf(&state->output,
+                       "irqnr       total  since-last   status  name\n");
+       for_each_irq_desc(n, desc) {
+               struct irqaction *act = desc->action;
+               if (!act && !kstat_irqs(n))
+                       continue;
+               fiq_debugger_printf(&state->output, "%5d: %10u %11u %8x  %s\n", n,
+                       kstat_irqs(n),
+                       kstat_irqs(n) - state->last_irqs[n],
+                       desc->status_use_accessors,
+                       (act && act->name) ? act->name : "???");
+               state->last_irqs[n] = kstat_irqs(n);
+       }
+}
+
+static void fiq_debugger_do_ps(struct fiq_debugger_state *state)
+{
+       struct task_struct *g;
+       struct task_struct *p;
+       unsigned task_state;
+       static const char stat_nam[] = "RSDTtZX";
+
+       fiq_debugger_printf(&state->output, "pid   ppid  prio task            pc\n");
+       read_lock(&tasklist_lock);
+       do_each_thread(g, p) {
+               task_state = p->state ? __ffs(p->state) + 1 : 0;
+               fiq_debugger_printf(&state->output,
+                            "%5d %5d %4d ", p->pid, p->parent->pid, p->prio);
+               fiq_debugger_printf(&state->output, "%-13.13s %c", p->comm,
+                            task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]);
+               if (task_state == TASK_RUNNING)
+                       fiq_debugger_printf(&state->output, " running\n");
+               else
+                       fiq_debugger_printf(&state->output, " %08lx\n",
+                                       thread_saved_pc(p));
+       } while_each_thread(g, p);
+       read_unlock(&tasklist_lock);
+}
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state)
+{
+       state->syslog_dumping = true;
+}
+
+static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state)
+{
+       state->syslog_dumping = false;
+}
+#else
+extern int do_syslog(int type, char __user *bug, int count);
+static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state)
+{
+       do_syslog(5 /* clear */, NULL, 0);
+}
+
+static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state)
+{
+       fiq_debugger_dump_kernel_log(state);
+}
+#endif
+
+static void fiq_debugger_do_sysrq(struct fiq_debugger_state *state, char rq)
+{
+       if ((rq == 'g' || rq == 'G') && !fiq_kgdb_enable) {
+               fiq_debugger_printf(&state->output, "sysrq-g blocked\n");
+               return;
+       }
+       fiq_debugger_begin_syslog_dump(state);
+       handle_sysrq(rq);
+       fiq_debugger_end_syslog_dump(state);
+}
+
+#ifdef CONFIG_KGDB
+static void fiq_debugger_do_kgdb(struct fiq_debugger_state *state)
+{
+       if (!fiq_kgdb_enable) {
+               fiq_debugger_printf(&state->output, "kgdb through fiq debugger not enabled\n");
+               return;
+       }
+
+       fiq_debugger_printf(&state->output, "enabling console and triggering kgdb\n");
+       state->console_enable = true;
+       handle_sysrq('g');
+}
+#endif
+
+static void fiq_debugger_schedule_work(struct fiq_debugger_state *state,
+               char *cmd)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&state->work_lock, flags);
+       if (state->work_cmd[0] != '\0') {
+               fiq_debugger_printf(&state->output, "work command processor busy\n");
+               spin_unlock_irqrestore(&state->work_lock, flags);
+               return;
+       }
+
+       strlcpy(state->work_cmd, cmd, sizeof(state->work_cmd));
+       spin_unlock_irqrestore(&state->work_lock, flags);
+
+       schedule_work(&state->work);
+}
+
+static void fiq_debugger_work(struct work_struct *work)
+{
+       struct fiq_debugger_state *state;
+       char work_cmd[DEBUG_MAX];
+       char *cmd;
+       unsigned long flags;
+
+       state = container_of(work, struct fiq_debugger_state, work);
+
+       spin_lock_irqsave(&state->work_lock, flags);
+
+       strlcpy(work_cmd, state->work_cmd, sizeof(work_cmd));
+       state->work_cmd[0] = '\0';
+
+       spin_unlock_irqrestore(&state->work_lock, flags);
+
+       cmd = work_cmd;
+       if (!strncmp(cmd, "reboot", 6)) {
+               cmd += 6;
+               while (*cmd == ' ')
+                       cmd++;
+               if (cmd != '\0')
+                       kernel_restart(cmd);
+               else
+                       kernel_restart(NULL);
+       } else {
+               fiq_debugger_printf(&state->output, "unknown work command '%s'\n",
+                               work_cmd);
+       }
+}
+
+/* This function CANNOT be called in FIQ context */
+static void fiq_debugger_irq_exec(struct fiq_debugger_state *state, char *cmd)
+{
+       if (!strcmp(cmd, "ps"))
+               fiq_debugger_do_ps(state);
+       if (!strcmp(cmd, "sysrq"))
+               fiq_debugger_do_sysrq(state, 'h');
+       if (!strncmp(cmd, "sysrq ", 6))
+               fiq_debugger_do_sysrq(state, cmd[6]);
+#ifdef CONFIG_KGDB
+       if (!strcmp(cmd, "kgdb"))
+               fiq_debugger_do_kgdb(state);
+#endif
+       if (!strncmp(cmd, "reboot", 6))
+               fiq_debugger_schedule_work(state, cmd);
+}
+
+static void fiq_debugger_help(struct fiq_debugger_state *state)
+{
+       fiq_debugger_printf(&state->output,
+                               "FIQ Debugger commands:\n"
+                               " pc            PC status\n"
+                               " regs          Register dump\n"
+                               " allregs       Extended Register dump\n"
+                               " bt            Stack trace\n"
+                               " reboot [<c>]  Reboot with command <c>\n"
+                               " reset [<c>]   Hard reset with command <c>\n"
+                               " irqs          Interupt status\n"
+                               " kmsg          Kernel log\n"
+                               " version       Kernel version\n");
+       fiq_debugger_printf(&state->output,
+                               " sleep         Allow sleep while in FIQ\n"
+                               " nosleep       Disable sleep while in FIQ\n"
+                               " console       Switch terminal to console\n"
+                               " cpu           Current CPU\n"
+                               " cpu <number>  Switch to CPU<number>\n");
+       fiq_debugger_printf(&state->output,
+                               " ps            Process list\n"
+                               " sysrq         sysrq options\n"
+                               " sysrq <param> Execute sysrq with <param>\n");
+#ifdef CONFIG_KGDB
+       fiq_debugger_printf(&state->output,
+                               " kgdb          Enter kernel debugger\n");
+#endif
+}
+
+static void fiq_debugger_take_affinity(void *info)
+{
+       struct fiq_debugger_state *state = info;
+       struct cpumask cpumask;
+
+       cpumask_clear(&cpumask);
+       cpumask_set_cpu(get_cpu(), &cpumask);
+
+       irq_set_affinity(state->uart_irq, &cpumask);
+}
+
+static void fiq_debugger_switch_cpu(struct fiq_debugger_state *state, int cpu)
+{
+       if (!fiq_debugger_have_fiq(state))
+               smp_call_function_single(cpu, fiq_debugger_take_affinity, state,
+                               false);
+       state->current_cpu = cpu;
+}
+
+static bool fiq_debugger_fiq_exec(struct fiq_debugger_state *state,
+                       const char *cmd, const struct pt_regs *regs,
+                       void *svc_sp)
+{
+       bool signal_helper = false;
+
+       if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) {
+               fiq_debugger_help(state);
+       } else if (!strcmp(cmd, "pc")) {
+               fiq_debugger_dump_pc(&state->output, regs);
+       } else if (!strcmp(cmd, "regs")) {
+               fiq_debugger_dump_regs(&state->output, regs);
+       } else if (!strcmp(cmd, "allregs")) {
+               fiq_debugger_dump_allregs(&state->output, regs);
+       } else if (!strcmp(cmd, "bt")) {
+               fiq_debugger_dump_stacktrace(&state->output, regs, 100, svc_sp);
+       } else if (!strncmp(cmd, "reset", 5)) {
+               cmd += 5;
+               while (*cmd == ' ')
+                       cmd++;
+               if (*cmd) {
+                       char tmp_cmd[32];
+                       strlcpy(tmp_cmd, cmd, sizeof(tmp_cmd));
+                       machine_restart(tmp_cmd);
+               } else {
+                       machine_restart(NULL);
+               }
+       } else if (!strcmp(cmd, "irqs")) {
+               fiq_debugger_dump_irqs(state);
+       } else if (!strcmp(cmd, "kmsg")) {
+               fiq_debugger_dump_kernel_log(state);
+       } else if (!strcmp(cmd, "version")) {
+               fiq_debugger_printf(&state->output, "%s\n", linux_banner);
+       } else if (!strcmp(cmd, "sleep")) {
+               state->no_sleep = false;
+               fiq_debugger_printf(&state->output, "enabling sleep\n");
+       } else if (!strcmp(cmd, "nosleep")) {
+               state->no_sleep = true;
+               fiq_debugger_printf(&state->output, "disabling sleep\n");
+       } else if (!strcmp(cmd, "console")) {
+               fiq_debugger_printf(&state->output, "console mode\n");
+               fiq_debugger_uart_flush(state);
+               state->console_enable = true;
+       } else if (!strcmp(cmd, "cpu")) {
+               fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu);
+       } else if (!strncmp(cmd, "cpu ", 4)) {
+               unsigned long cpu = 0;
+               if (strict_strtoul(cmd + 4, 10, &cpu) == 0)
+                       fiq_debugger_switch_cpu(state, cpu);
+               else
+                       fiq_debugger_printf(&state->output, "invalid cpu\n");
+               fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu);
+       } else {
+               if (state->debug_busy) {
+                       fiq_debugger_printf(&state->output,
+                               "command processor busy. trying to abort.\n");
+                       state->debug_abort = -1;
+               } else {
+                       strcpy(state->debug_cmd, cmd);
+                       state->debug_busy = 1;
+               }
+
+               return true;
+       }
+       if (!state->console_enable)
+               fiq_debugger_prompt(state);
+
+       return signal_helper;
+}
+
+static void fiq_debugger_sleep_timer_expired(unsigned long data)
+{
+       struct fiq_debugger_state *state = (struct fiq_debugger_state *)data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&state->sleep_timer_lock, flags);
+       if (state->uart_enabled && !state->no_sleep) {
+               if (state->debug_enable && !state->console_enable) {
+                       state->debug_enable = false;
+                       fiq_debugger_printf_nfiq(state,
+                                       "suspending fiq debugger\n");
+               }
+               state->ignore_next_wakeup_irq = true;
+               fiq_debugger_uart_disable(state);
+               state->uart_enabled = false;
+               fiq_debugger_enable_wakeup_irq(state);
+       }
+       wake_unlock(&state->debugger_wake_lock);
+       spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static void fiq_debugger_handle_wakeup(struct fiq_debugger_state *state)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&state->sleep_timer_lock, flags);
+       if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) {
+               state->ignore_next_wakeup_irq = false;
+       } else if (!state->uart_enabled) {
+               wake_lock(&state->debugger_wake_lock);
+               fiq_debugger_uart_enable(state);
+               state->uart_enabled = true;
+               fiq_debugger_disable_wakeup_irq(state);
+               mod_timer(&state->sleep_timer, jiffies + HZ / 2);
+       }
+       spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static irqreturn_t fiq_debugger_wakeup_irq_handler(int irq, void *dev)
+{
+       struct fiq_debugger_state *state = dev;
+
+       if (!state->no_sleep)
+               fiq_debugger_puts(state, "WAKEUP\n");
+       fiq_debugger_handle_wakeup(state);
+
+       return IRQ_HANDLED;
+}
+
+static
+void fiq_debugger_handle_console_irq_context(struct fiq_debugger_state *state)
+{
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+       if (state->tty_port.ops) {
+               int i;
+               int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+               for (i = 0; i < count; i++) {
+                       int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+                       tty_insert_flip_char(&state->tty_port, c, TTY_NORMAL);
+                       if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1))
+                               pr_warn("fiq tty failed to consume byte\n");
+               }
+               tty_flip_buffer_push(&state->tty_port);
+       }
+#endif
+}
+
+static void fiq_debugger_handle_irq_context(struct fiq_debugger_state *state)
+{
+       if (!state->no_sleep) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&state->sleep_timer_lock, flags);
+               wake_lock(&state->debugger_wake_lock);
+               mod_timer(&state->sleep_timer, jiffies + HZ * 5);
+               spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+       }
+       fiq_debugger_handle_console_irq_context(state);
+       if (state->debug_busy) {
+               fiq_debugger_irq_exec(state, state->debug_cmd);
+               if (!state->console_enable)
+                       fiq_debugger_prompt(state);
+               state->debug_busy = 0;
+       }
+}
+
+static int fiq_debugger_getc(struct fiq_debugger_state *state)
+{
+       return state->pdata->uart_getc(state->pdev);
+}
+
+static bool fiq_debugger_handle_uart_interrupt(struct fiq_debugger_state *state,
+                       int this_cpu, const struct pt_regs *regs, void *svc_sp)
+{
+       int c;
+       static int last_c;
+       int count = 0;
+       bool signal_helper = false;
+
+       if (this_cpu != state->current_cpu) {
+               if (state->in_fiq)
+                       return false;
+
+               if (atomic_inc_return(&state->unhandled_fiq_count) !=
+                                       MAX_UNHANDLED_FIQ_COUNT)
+                       return false;
+
+               fiq_debugger_printf(&state->output,
+                       "fiq_debugger: cpu %d not responding, "
+                       "reverting to cpu %d\n", state->current_cpu,
+                       this_cpu);
+
+               atomic_set(&state->unhandled_fiq_count, 0);
+               fiq_debugger_switch_cpu(state, this_cpu);
+               return false;
+       }
+
+       state->in_fiq = true;
+
+       while ((c = fiq_debugger_getc(state)) != FIQ_DEBUGGER_NO_CHAR) {
+               count++;
+               if (!state->debug_enable) {
+                       if ((c == 13) || (c == 10)) {
+                               state->debug_enable = true;
+                               state->debug_count = 0;
+                               fiq_debugger_prompt(state);
+                       }
+               } else if (c == FIQ_DEBUGGER_BREAK) {
+                       state->console_enable = false;
+                       fiq_debugger_puts(state, "fiq debugger mode\n");
+                       state->debug_count = 0;
+                       fiq_debugger_prompt(state);
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+               } else if (state->console_enable && state->tty_rbuf) {
+                       fiq_debugger_ringbuf_push(state->tty_rbuf, c);
+                       signal_helper = true;
+#endif
+               } else if ((c >= ' ') && (c < 127)) {
+                       if (state->debug_count < (DEBUG_MAX - 1)) {
+                               state->debug_buf[state->debug_count++] = c;
+                               fiq_debugger_putc(state, c);
+                       }
+               } else if ((c == 8) || (c == 127)) {
+                       if (state->debug_count > 0) {
+                               state->debug_count--;
+                               fiq_debugger_putc(state, 8);
+                               fiq_debugger_putc(state, ' ');
+                               fiq_debugger_putc(state, 8);
+                       }
+               } else if ((c == 13) || (c == 10)) {
+                       if (c == '\r' || (c == '\n' && last_c != '\r')) {
+                               fiq_debugger_putc(state, '\r');
+                               fiq_debugger_putc(state, '\n');
+                       }
+                       if (state->debug_count) {
+                               state->debug_buf[state->debug_count] = 0;
+                               state->debug_count = 0;
+                               signal_helper |=
+                                       fiq_debugger_fiq_exec(state,
+                                                       state->debug_buf,
+                                                       regs, svc_sp);
+                       } else {
+                               fiq_debugger_prompt(state);
+                       }
+               }
+               last_c = c;
+       }
+       if (!state->console_enable)
+               fiq_debugger_uart_flush(state);
+       if (state->pdata->fiq_ack)
+               state->pdata->fiq_ack(state->pdev, state->fiq);
+
+       /* poke sleep timer if necessary */
+       if (state->debug_enable && !state->no_sleep)
+               signal_helper = true;
+
+       atomic_set(&state->unhandled_fiq_count, 0);
+       state->in_fiq = false;
+
+       return signal_helper;
+}
+
+#ifdef CONFIG_FIQ_GLUE
+static void fiq_debugger_fiq(struct fiq_glue_handler *h,
+               const struct pt_regs *regs, void *svc_sp)
+{
+       struct fiq_debugger_state *state =
+               container_of(h, struct fiq_debugger_state, handler);
+       unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu;
+       bool need_irq;
+
+       need_irq = fiq_debugger_handle_uart_interrupt(state, this_cpu, regs,
+                       svc_sp);
+       if (need_irq)
+               fiq_debugger_force_irq(state);
+}
+#endif
+
+/*
+ * When not using FIQs, we only use this single interrupt as an entry point.
+ * This just effectively takes over the UART interrupt and does all the work
+ * in this context.
+ */
+static irqreturn_t fiq_debugger_uart_irq(int irq, void *dev)
+{
+       struct fiq_debugger_state *state = dev;
+       bool not_done;
+
+       fiq_debugger_handle_wakeup(state);
+
+       /* handle the debugger irq in regular context */
+       not_done = fiq_debugger_handle_uart_interrupt(state, smp_processor_id(),
+                                             get_irq_regs(),
+                                             current_thread_info());
+       if (not_done)
+               fiq_debugger_handle_irq_context(state);
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * If FIQs are used, not everything can happen in fiq context.
+ * FIQ handler does what it can and then signals this interrupt to finish the
+ * job in irq context.
+ */
+static irqreturn_t fiq_debugger_signal_irq(int irq, void *dev)
+{
+       struct fiq_debugger_state *state = dev;
+
+       if (state->pdata->force_irq_ack)
+               state->pdata->force_irq_ack(state->pdev, state->signal_irq);
+
+       fiq_debugger_handle_irq_context(state);
+
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_FIQ_GLUE
+static void fiq_debugger_resume(struct fiq_glue_handler *h)
+{
+       struct fiq_debugger_state *state =
+               container_of(h, struct fiq_debugger_state, handler);
+       if (state->pdata->uart_resume)
+               state->pdata->uart_resume(state->pdev);
+}
+#endif
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+struct tty_driver *fiq_debugger_console_device(struct console *co, int *index)
+{
+       *index = co->index;
+       return fiq_tty_driver;
+}
+
+static void fiq_debugger_console_write(struct console *co,
+                               const char *s, unsigned int count)
+{
+       struct fiq_debugger_state *state;
+       unsigned long flags;
+
+       state = container_of(co, struct fiq_debugger_state, console);
+
+       if (!state->console_enable && !state->syslog_dumping)
+               return;
+
+       fiq_debugger_uart_enable(state);
+       spin_lock_irqsave(&state->console_lock, flags);
+       while (count--) {
+               if (*s == '\n')
+                       fiq_debugger_putc(state, '\r');
+               fiq_debugger_putc(state, *s++);
+       }
+       fiq_debugger_uart_flush(state);
+       spin_unlock_irqrestore(&state->console_lock, flags);
+       fiq_debugger_uart_disable(state);
+}
+
+static struct console fiq_debugger_console = {
+       .name = "ttyFIQ",
+       .device = fiq_debugger_console_device,
+       .write = fiq_debugger_console_write,
+       .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED,
+};
+
+int fiq_tty_open(struct tty_struct *tty, struct file *filp)
+{
+       int line = tty->index;
+       struct fiq_debugger_state **states = tty->driver->driver_state;
+       struct fiq_debugger_state *state = states[line];
+
+       return tty_port_open(&state->tty_port, tty, filp);
+}
+
+void fiq_tty_close(struct tty_struct *tty, struct file *filp)
+{
+       tty_port_close(tty->port, tty, filp);
+}
+
+int  fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+       int i;
+       int line = tty->index;
+       struct fiq_debugger_state **states = tty->driver->driver_state;
+       struct fiq_debugger_state *state = states[line];
+
+       if (!state->console_enable)
+               return count;
+
+       fiq_debugger_uart_enable(state);
+       spin_lock_irq(&state->console_lock);
+       for (i = 0; i < count; i++)
+               fiq_debugger_putc(state, *buf++);
+       spin_unlock_irq(&state->console_lock);
+       fiq_debugger_uart_disable(state);
+
+       return count;
+}
+
+int  fiq_tty_write_room(struct tty_struct *tty)
+{
+       return 16;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int fiq_tty_poll_init(struct tty_driver *driver, int line, char *options)
+{
+       return 0;
+}
+
+static int fiq_tty_poll_get_char(struct tty_driver *driver, int line)
+{
+       struct fiq_debugger_state **states = driver->driver_state;
+       struct fiq_debugger_state *state = states[line];
+       int c = NO_POLL_CHAR;
+
+       fiq_debugger_uart_enable(state);
+       if (fiq_debugger_have_fiq(state)) {
+               int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+               if (count > 0) {
+                       c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+                       fiq_debugger_ringbuf_consume(state->tty_rbuf, 1);
+               }
+       } else {
+               c = fiq_debugger_getc(state);
+               if (c == FIQ_DEBUGGER_NO_CHAR)
+                       c = NO_POLL_CHAR;
+       }
+       fiq_debugger_uart_disable(state);
+
+       return c;
+}
+
+static void fiq_tty_poll_put_char(struct tty_driver *driver, int line, char ch)
+{
+       struct fiq_debugger_state **states = driver->driver_state;
+       struct fiq_debugger_state *state = states[line];
+       fiq_debugger_uart_enable(state);
+       fiq_debugger_putc(state, ch);
+       fiq_debugger_uart_disable(state);
+}
+#endif
+
+static const struct tty_port_operations fiq_tty_port_ops;
+
+static const struct tty_operations fiq_tty_driver_ops = {
+       .write = fiq_tty_write,
+       .write_room = fiq_tty_write_room,
+       .open = fiq_tty_open,
+       .close = fiq_tty_close,
+#ifdef CONFIG_CONSOLE_POLL
+       .poll_init = fiq_tty_poll_init,
+       .poll_get_char = fiq_tty_poll_get_char,
+       .poll_put_char = fiq_tty_poll_put_char,
+#endif
+};
+
+static int fiq_debugger_tty_init(void)
+{
+       int ret;
+       struct fiq_debugger_state **states = NULL;
+
+       states = kzalloc(sizeof(*states) * MAX_FIQ_DEBUGGER_PORTS, GFP_KERNEL);
+       if (!states) {
+               pr_err("Failed to allocate fiq debugger state structres\n");
+               return -ENOMEM;
+       }
+
+       fiq_tty_driver = alloc_tty_driver(MAX_FIQ_DEBUGGER_PORTS);
+       if (!fiq_tty_driver) {
+               pr_err("Failed to allocate fiq debugger tty\n");
+               ret = -ENOMEM;
+               goto err_free_state;
+       }
+
+       fiq_tty_driver->owner           = THIS_MODULE;
+       fiq_tty_driver->driver_name     = "fiq-debugger";
+       fiq_tty_driver->name            = "ttyFIQ";
+       fiq_tty_driver->type            = TTY_DRIVER_TYPE_SERIAL;
+       fiq_tty_driver->subtype         = SERIAL_TYPE_NORMAL;
+       fiq_tty_driver->init_termios    = tty_std_termios;
+       fiq_tty_driver->flags           = TTY_DRIVER_REAL_RAW |
+                                         TTY_DRIVER_DYNAMIC_DEV;
+       fiq_tty_driver->driver_state    = states;
+
+       fiq_tty_driver->init_termios.c_cflag =
+                                       B115200 | CS8 | CREAD | HUPCL | CLOCAL;
+       fiq_tty_driver->init_termios.c_ispeed = 115200;
+       fiq_tty_driver->init_termios.c_ospeed = 115200;
+
+       tty_set_operations(fiq_tty_driver, &fiq_tty_driver_ops);
+
+       ret = tty_register_driver(fiq_tty_driver);
+       if (ret) {
+               pr_err("Failed to register fiq tty: %d\n", ret);
+               goto err_free_tty;
+       }
+
+       pr_info("Registered FIQ tty driver\n");
+       return 0;
+
+err_free_tty:
+       put_tty_driver(fiq_tty_driver);
+       fiq_tty_driver = NULL;
+err_free_state:
+       kfree(states);
+       return ret;
+}
+
+static int fiq_debugger_tty_init_one(struct fiq_debugger_state *state)
+{
+       int ret;
+       struct device *tty_dev;
+       struct fiq_debugger_state **states = fiq_tty_driver->driver_state;
+
+       states[state->pdev->id] = state;
+
+       state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024);
+       if (!state->tty_rbuf) {
+               pr_err("Failed to allocate fiq debugger ringbuf\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       tty_port_init(&state->tty_port);
+       state->tty_port.ops = &fiq_tty_port_ops;
+
+       tty_dev = tty_port_register_device(&state->tty_port, fiq_tty_driver,
+                                          state->pdev->id, &state->pdev->dev);
+       if (IS_ERR(tty_dev)) {
+               pr_err("Failed to register fiq debugger tty device\n");
+               ret = PTR_ERR(tty_dev);
+               goto err;
+       }
+
+       device_set_wakeup_capable(tty_dev, 1);
+
+       pr_info("Registered fiq debugger ttyFIQ%d\n", state->pdev->id);
+
+       return 0;
+
+err:
+       fiq_debugger_ringbuf_free(state->tty_rbuf);
+       state->tty_rbuf = NULL;
+       return ret;
+}
+#endif
+
+static int fiq_debugger_dev_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+       if (state->pdata->uart_dev_suspend)
+               return state->pdata->uart_dev_suspend(pdev);
+       return 0;
+}
+
+static int fiq_debugger_dev_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+       if (state->pdata->uart_dev_resume)
+               return state->pdata->uart_dev_resume(pdev);
+       return 0;
+}
+
+static int fiq_debugger_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev);
+       struct fiq_debugger_state *state;
+       int fiq;
+       int uart_irq;
+
+       if (pdev->id >= MAX_FIQ_DEBUGGER_PORTS)
+               return -EINVAL;
+
+       if (!pdata->uart_getc || !pdata->uart_putc)
+               return -EINVAL;
+       if ((pdata->uart_enable && !pdata->uart_disable) ||
+           (!pdata->uart_enable && pdata->uart_disable))
+               return -EINVAL;
+
+       fiq = platform_get_irq_byname(pdev, "fiq");
+       uart_irq = platform_get_irq_byname(pdev, "uart_irq");
+
+       /* uart_irq mode and fiq mode are mutually exclusive, but one of them
+        * is required */
+       if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0))
+               return -EINVAL;
+       if (fiq >= 0 && !pdata->fiq_enable)
+               return -EINVAL;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       state->output.printf = fiq_debugger_printf;
+       setup_timer(&state->sleep_timer, fiq_debugger_sleep_timer_expired,
+                   (unsigned long)state);
+       state->pdata = pdata;
+       state->pdev = pdev;
+       state->no_sleep = initial_no_sleep;
+       state->debug_enable = initial_debug_enable;
+       state->console_enable = initial_console_enable;
+
+       state->fiq = fiq;
+       state->uart_irq = uart_irq;
+       state->signal_irq = platform_get_irq_byname(pdev, "signal");
+       state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup");
+
+       INIT_WORK(&state->work, fiq_debugger_work);
+       spin_lock_init(&state->work_lock);
+
+       platform_set_drvdata(pdev, state);
+
+       spin_lock_init(&state->sleep_timer_lock);
+
+       if (state->wakeup_irq < 0 && fiq_debugger_have_fiq(state))
+               state->no_sleep = true;
+       state->ignore_next_wakeup_irq = !state->no_sleep;
+
+       wake_lock_init(&state->debugger_wake_lock,
+                       WAKE_LOCK_SUSPEND, "serial-debug");
+
+       state->clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(state->clk))
+               state->clk = NULL;
+
+       /* do not call pdata->uart_enable here since uart_init may still
+        * need to do some initialization before uart_enable can work.
+        * So, only try to manage the clock during init.
+        */
+       if (state->clk)
+               clk_enable(state->clk);
+
+       if (pdata->uart_init) {
+               ret = pdata->uart_init(pdev);
+               if (ret)
+                       goto err_uart_init;
+       }
+
+       fiq_debugger_printf_nfiq(state,
+                               "<hit enter %sto activate fiq debugger>\n",
+                               state->no_sleep ? "" : "twice ");
+
+#ifdef CONFIG_FIQ_GLUE
+       if (fiq_debugger_have_fiq(state)) {
+               state->handler.fiq = fiq_debugger_fiq;
+               state->handler.resume = fiq_debugger_resume;
+               ret = fiq_glue_register_handler(&state->handler);
+               if (ret) {
+                       pr_err("%s: could not install fiq handler\n", __func__);
+                       goto err_register_irq;
+               }
+
+               pdata->fiq_enable(pdev, state->fiq, 1);
+       } else
+#endif
+       {
+               ret = request_irq(state->uart_irq, fiq_debugger_uart_irq,
+                                 IRQF_NO_SUSPEND, "debug", state);
+               if (ret) {
+                       pr_err("%s: could not install irq handler\n", __func__);
+                       goto err_register_irq;
+               }
+
+               /* for irq-only mode, we want this irq to wake us up, if it
+                * can.
+                */
+               enable_irq_wake(state->uart_irq);
+       }
+
+       if (state->clk)
+               clk_disable(state->clk);
+
+       if (state->signal_irq >= 0) {
+               ret = request_irq(state->signal_irq, fiq_debugger_signal_irq,
+                         IRQF_TRIGGER_RISING, "debug-signal", state);
+               if (ret)
+                       pr_err("serial_debugger: could not install signal_irq");
+       }
+
+       if (state->wakeup_irq >= 0) {
+               ret = request_irq(state->wakeup_irq,
+                                 fiq_debugger_wakeup_irq_handler,
+                                 IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+                                 "debug-wakeup", state);
+               if (ret) {
+                       pr_err("serial_debugger: "
+                               "could not install wakeup irq\n");
+                       state->wakeup_irq = -1;
+               } else {
+                       ret = enable_irq_wake(state->wakeup_irq);
+                       if (ret) {
+                               pr_err("serial_debugger: "
+                                       "could not enable wakeup\n");
+                               state->wakeup_irq_no_set_wake = true;
+                       }
+               }
+       }
+       if (state->no_sleep)
+               fiq_debugger_handle_wakeup(state);
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+       spin_lock_init(&state->console_lock);
+       state->console = fiq_debugger_console;
+       state->console.index = pdev->id;
+       if (!console_set_on_cmdline)
+               add_preferred_console(state->console.name,
+                       state->console.index, NULL);
+       register_console(&state->console);
+       fiq_debugger_tty_init_one(state);
+#endif
+       return 0;
+
+err_register_irq:
+       if (pdata->uart_free)
+               pdata->uart_free(pdev);
+err_uart_init:
+       if (state->clk)
+               clk_disable(state->clk);
+       if (state->clk)
+               clk_put(state->clk);
+       wake_lock_destroy(&state->debugger_wake_lock);
+       platform_set_drvdata(pdev, NULL);
+       kfree(state);
+       return ret;
+}
+
+static const struct dev_pm_ops fiq_debugger_dev_pm_ops = {
+       .suspend        = fiq_debugger_dev_suspend,
+       .resume         = fiq_debugger_dev_resume,
+};
+
+static struct platform_driver fiq_debugger_driver = {
+       .probe  = fiq_debugger_probe,
+       .driver = {
+               .name   = "fiq_debugger",
+               .pm     = &fiq_debugger_dev_pm_ops,
+       },
+};
+
+static int __init fiq_debugger_init(void)
+{
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+       fiq_debugger_tty_init();
+#endif
+       return platform_driver_register(&fiq_debugger_driver);
+}
+
+postcore_initcall(fiq_debugger_init);
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.h b/drivers/staging/android/fiq_debugger/fiq_debugger.h
new file mode 100644 (file)
index 0000000..c9ec4f8
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * drivers/staging/android/fiq_debugger/fiq_debugger.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+
+#include <linux/serial_core.h>
+
+#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR
+#define FIQ_DEBUGGER_BREAK 0x00ff0100
+
+#define FIQ_DEBUGGER_FIQ_IRQ_NAME      "fiq"
+#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME   "signal"
+#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME   "wakeup"
+
+/**
+ * struct fiq_debugger_pdata - fiq debugger platform data
+ * @uart_resume:       used to restore uart state right before enabling
+ *                     the fiq.
+ * @uart_enable:       Do the work necessary to communicate with the uart
+ *                     hw (enable clocks, etc.). This must be ref-counted.
+ * @uart_disable:      Do the work necessary to disable the uart hw
+ *                     (disable clocks, etc.). This must be ref-counted.
+ * @uart_dev_suspend:  called during PM suspend, generally not needed
+ *                     for real fiq mode debugger.
+ * @uart_dev_resume:   called during PM resume, generally not needed
+ *                     for real fiq mode debugger.
+ */
+struct fiq_debugger_pdata {
+       int (*uart_init)(struct platform_device *pdev);
+       void (*uart_free)(struct platform_device *pdev);
+       int (*uart_resume)(struct platform_device *pdev);
+       int (*uart_getc)(struct platform_device *pdev);
+       void (*uart_putc)(struct platform_device *pdev, unsigned int c);
+       void (*uart_flush)(struct platform_device *pdev);
+       void (*uart_enable)(struct platform_device *pdev);
+       void (*uart_disable)(struct platform_device *pdev);
+
+       int (*uart_dev_suspend)(struct platform_device *pdev);
+       int (*uart_dev_resume)(struct platform_device *pdev);
+
+       void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq,
+                                                               bool enable);
+       void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq);
+
+       void (*force_irq)(struct platform_device *pdev, unsigned int irq);
+       void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq);
+};
+
+#endif
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c
new file mode 100644 (file)
index 0000000..8b3e013
--- /dev/null
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+
+#include <asm/stacktrace.h>
+
+#include "fiq_debugger_priv.h"
+
+static char *mode_name(unsigned cpsr)
+{
+       switch (cpsr & MODE_MASK) {
+       case USR_MODE: return "USR";
+       case FIQ_MODE: return "FIQ";
+       case IRQ_MODE: return "IRQ";
+       case SVC_MODE: return "SVC";
+       case ABT_MODE: return "ABT";
+       case UND_MODE: return "UND";
+       case SYSTEM_MODE: return "SYS";
+       default: return "???";
+       }
+}
+
+void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+       output->printf(output, " pc %08x cpsr %08x mode %s\n",
+               regs->ARM_pc, regs->ARM_cpsr, mode_name(regs->ARM_cpsr));
+}
+
+void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+       output->printf(output,
+                       " r0 %08x  r1 %08x  r2 %08x  r3 %08x\n",
+                       regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
+       output->printf(output,
+                       " r4 %08x  r5 %08x  r6 %08x  r7 %08x\n",
+                       regs->ARM_r4, regs->ARM_r5, regs->ARM_r6, regs->ARM_r7);
+       output->printf(output,
+                       " r8 %08x  r9 %08x r10 %08x r11 %08x  mode %s\n",
+                       regs->ARM_r8, regs->ARM_r9, regs->ARM_r10, regs->ARM_fp,
+                       mode_name(regs->ARM_cpsr));
+       output->printf(output,
+                       " ip %08x  sp %08x  lr %08x  pc %08x cpsr %08x\n",
+                       regs->ARM_ip, regs->ARM_sp, regs->ARM_lr, regs->ARM_pc,
+                       regs->ARM_cpsr);
+}
+
+struct mode_regs {
+       unsigned long sp_svc;
+       unsigned long lr_svc;
+       unsigned long spsr_svc;
+
+       unsigned long sp_abt;
+       unsigned long lr_abt;
+       unsigned long spsr_abt;
+
+       unsigned long sp_und;
+       unsigned long lr_und;
+       unsigned long spsr_und;
+
+       unsigned long sp_irq;
+       unsigned long lr_irq;
+       unsigned long spsr_irq;
+
+       unsigned long r8_fiq;
+       unsigned long r9_fiq;
+       unsigned long r10_fiq;
+       unsigned long r11_fiq;
+       unsigned long r12_fiq;
+       unsigned long sp_fiq;
+       unsigned long lr_fiq;
+       unsigned long spsr_fiq;
+};
+
+static void __naked get_mode_regs(struct mode_regs *regs)
+{
+       asm volatile (
+       "mrs    r1, cpsr\n"
+       "msr    cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r8 - r14}\n"
+       "mrs    r2, spsr\n"
+       "stmia  r0!, {r2}\n"
+       "msr    cpsr_c, r1\n"
+       "bx     lr\n");
+}
+
+
+void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+       struct mode_regs mode_regs;
+       unsigned long mode = regs->ARM_cpsr & MODE_MASK;
+
+       fiq_debugger_dump_regs(output, regs);
+       get_mode_regs(&mode_regs);
+
+       output->printf(output,
+                       "%csvc: sp %08x  lr %08x  spsr %08x\n",
+                       mode == SVC_MODE ? '*' : ' ',
+                       mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc);
+       output->printf(output,
+                       "%cabt: sp %08x  lr %08x  spsr %08x\n",
+                       mode == ABT_MODE ? '*' : ' ',
+                       mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt);
+       output->printf(output,
+                       "%cund: sp %08x  lr %08x  spsr %08x\n",
+                       mode == UND_MODE ? '*' : ' ',
+                       mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und);
+       output->printf(output,
+                       "%cirq: sp %08x  lr %08x  spsr %08x\n",
+                       mode == IRQ_MODE ? '*' : ' ',
+                       mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq);
+       output->printf(output,
+                       "%cfiq: r8 %08x  r9 %08x  r10 %08x  r11 %08x  r12 %08x\n",
+                       mode == FIQ_MODE ? '*' : ' ',
+                       mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq,
+                       mode_regs.r11_fiq, mode_regs.r12_fiq);
+       output->printf(output,
+                       " fiq: sp %08x  lr %08x  spsr %08x\n",
+                       mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq);
+}
+
+struct stacktrace_state {
+       struct fiq_debugger_output *output;
+       unsigned int depth;
+};
+
+static int report_trace(struct stackframe *frame, void *d)
+{
+       struct stacktrace_state *sts = d;
+
+       if (sts->depth) {
+               sts->output->printf(sts->output,
+                       "  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+                       frame->pc, frame->pc, frame->lr, frame->lr,
+                       frame->sp, frame->fp);
+               sts->depth--;
+               return 0;
+       }
+       sts->output->printf(sts->output, "  ...\n");
+
+       return sts->depth == 0;
+}
+
+struct frame_tail {
+       struct frame_tail *fp;
+       unsigned long sp;
+       unsigned long lr;
+} __attribute__((packed));
+
+static struct frame_tail *user_backtrace(struct fiq_debugger_output *output,
+                                       struct frame_tail *tail)
+{
+       struct frame_tail buftail[2];
+
+       /* Also check accessibility of one struct frame_tail beyond */
+       if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) {
+               output->printf(output, "  invalid frame pointer %p\n",
+                               tail);
+               return NULL;
+       }
+       if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) {
+               output->printf(output,
+                       "  failed to copy frame pointer %p\n", tail);
+               return NULL;
+       }
+
+       output->printf(output, "  %p\n", buftail[0].lr);
+
+       /* frame pointers should strictly progress back up the stack
+        * (towards higher addresses) */
+       if (tail >= buftail[0].fp)
+               return NULL;
+
+       return buftail[0].fp-1;
+}
+
+void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
+               const struct pt_regs *regs, unsigned int depth, void *ssp)
+{
+       struct frame_tail *tail;
+       struct thread_info *real_thread_info = THREAD_INFO(ssp);
+       struct stacktrace_state sts;
+
+       sts.depth = depth;
+       sts.output = output;
+       *current_thread_info() = *real_thread_info;
+
+       if (!current)
+               output->printf(output, "current NULL\n");
+       else
+               output->printf(output, "pid: %d  comm: %s\n",
+                       current->pid, current->comm);
+       fiq_debugger_dump_regs(output, regs);
+
+       if (!user_mode(regs)) {
+               struct stackframe frame;
+               frame.fp = regs->ARM_fp;
+               frame.sp = regs->ARM_sp;
+               frame.lr = regs->ARM_lr;
+               frame.pc = regs->ARM_pc;
+               output->printf(output,
+                       "  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+                       regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr,
+                       regs->ARM_sp, regs->ARM_fp);
+               walk_stackframe(&frame, report_trace, &sts);
+               return;
+       }
+
+       tail = ((struct frame_tail *) regs->ARM_fp) - 1;
+       while (depth-- && tail && !((unsigned long) tail & 3))
+               tail = user_backtrace(output, tail);
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c
new file mode 100644 (file)
index 0000000..99c6584
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ptrace.h>
+#include <asm/stacktrace.h>
+
+#include "fiq_debugger_priv.h"
+
+static char *mode_name(const struct pt_regs *regs)
+{
+       if (compat_user_mode(regs)) {
+               return "USR";
+       } else {
+               switch (processor_mode(regs)) {
+               case PSR_MODE_EL0t: return "EL0t";
+               case PSR_MODE_EL1t: return "EL1t";
+               case PSR_MODE_EL1h: return "EL1h";
+               case PSR_MODE_EL2t: return "EL2t";
+               case PSR_MODE_EL2h: return "EL2h";
+               default: return "???";
+               }
+       }
+}
+
+void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+       output->printf(output, " pc %016lx cpsr %08lx mode %s\n",
+               regs->pc, regs->pstate, mode_name(regs));
+}
+
+void fiq_debugger_dump_regs_aarch32(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+       output->printf(output, " r0 %08x  r1 %08x  r2 %08x  r3 %08x\n",
+                       regs->compat_usr(0), regs->compat_usr(1),
+                       regs->compat_usr(2), regs->compat_usr(3));
+       output->printf(output, " r4 %08x  r5 %08x  r6 %08x  r7 %08x\n",
+                       regs->compat_usr(4), regs->compat_usr(5),
+                       regs->compat_usr(6), regs->compat_usr(7));
+       output->printf(output, " r8 %08x  r9 %08x r10 %08x r11 %08x\n",
+                       regs->compat_usr(8), regs->compat_usr(9),
+                       regs->compat_usr(10), regs->compat_usr(11));
+       output->printf(output, " ip %08x  sp %08x  lr %08x  pc %08x\n",
+                       regs->compat_usr(12), regs->compat_sp,
+                       regs->compat_lr, regs->pc);
+       output->printf(output, " cpsr %08x (%s)\n",
+                       regs->pstate, mode_name(regs));
+}
+
+void fiq_debugger_dump_regs_aarch64(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+
+       output->printf(output, "  x0 %016lx   x1 %016lx\n",
+                       regs->regs[0], regs->regs[1]);
+       output->printf(output, "  x2 %016lx   x3 %016lx\n",
+                       regs->regs[2], regs->regs[3]);
+       output->printf(output, "  x4 %016lx   x5 %016lx\n",
+                       regs->regs[4], regs->regs[5]);
+       output->printf(output, "  x6 %016lx   x7 %016lx\n",
+                       regs->regs[6], regs->regs[7]);
+       output->printf(output, "  x8 %016lx   x9 %016lx\n",
+                       regs->regs[8], regs->regs[9]);
+       output->printf(output, " x10 %016lx  x11 %016lx\n",
+                       regs->regs[10], regs->regs[11]);
+       output->printf(output, " x12 %016lx  x13 %016lx\n",
+                       regs->regs[12], regs->regs[13]);
+       output->printf(output, " x14 %016lx  x15 %016lx\n",
+                       regs->regs[14], regs->regs[15]);
+       output->printf(output, " x16 %016lx  x17 %016lx\n",
+                       regs->regs[16], regs->regs[17]);
+       output->printf(output, " x18 %016lx  x19 %016lx\n",
+                       regs->regs[18], regs->regs[19]);
+       output->printf(output, " x20 %016lx  x21 %016lx\n",
+                       regs->regs[20], regs->regs[21]);
+       output->printf(output, " x22 %016lx  x23 %016lx\n",
+                       regs->regs[22], regs->regs[23]);
+       output->printf(output, " x24 %016lx  x25 %016lx\n",
+                       regs->regs[24], regs->regs[25]);
+       output->printf(output, " x26 %016lx  x27 %016lx\n",
+                       regs->regs[26], regs->regs[27]);
+       output->printf(output, " x28 %016lx  x29 %016lx\n",
+                       regs->regs[28], regs->regs[29]);
+       output->printf(output, " x30 %016lx   sp %016lx\n",
+                       regs->regs[30], regs->sp);
+       output->printf(output, "  pc %016lx cpsr %08x (%s)\n",
+                       regs->pc, regs->pstate, mode_name(regs));
+}
+
+void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+       if (compat_user_mode(regs))
+               fiq_debugger_dump_regs_aarch32(output, regs);
+       else
+               fiq_debugger_dump_regs_aarch64(output, regs);
+}
+
+#define READ_SPECIAL_REG(x) ({ \
+       u64 val; \
+       asm volatile ("mrs %0, " # x : "=r"(val)); \
+       val; \
+})
+
+void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
+               const struct pt_regs *regs)
+{
+       u32 pstate = READ_SPECIAL_REG(CurrentEl);
+       bool in_el2 = (pstate & PSR_MODE_MASK) >= PSR_MODE_EL2t;
+
+       fiq_debugger_dump_regs(output, regs);
+
+       output->printf(output, " sp_el0   %016lx\n",
+                       READ_SPECIAL_REG(sp_el0));
+
+       if (in_el2)
+               output->printf(output, " sp_el1   %016lx\n",
+                               READ_SPECIAL_REG(sp_el1));
+
+       output->printf(output, " elr_el1  %016lx\n",
+                       READ_SPECIAL_REG(elr_el1));
+
+       output->printf(output, " spsr_el1 %08lx\n",
+                       READ_SPECIAL_REG(spsr_el1));
+
+       if (in_el2) {
+               output->printf(output, " spsr_irq %08lx\n",
+                               READ_SPECIAL_REG(spsr_irq));
+               output->printf(output, " spsr_abt %08lx\n",
+                               READ_SPECIAL_REG(spsr_abt));
+               output->printf(output, " spsr_und %08lx\n",
+                               READ_SPECIAL_REG(spsr_und));
+               output->printf(output, " spsr_fiq %08lx\n",
+                               READ_SPECIAL_REG(spsr_fiq));
+               output->printf(output, " spsr_el2 %08lx\n",
+                               READ_SPECIAL_REG(elr_el2));
+               output->printf(output, " spsr_el2 %08lx\n",
+                               READ_SPECIAL_REG(spsr_el2));
+       }
+}
+
+struct stacktrace_state {
+       struct fiq_debugger_output *output;
+       unsigned int depth;
+};
+
+static int report_trace(struct stackframe *frame, void *d)
+{
+       struct stacktrace_state *sts = d;
+
+       if (sts->depth) {
+               sts->output->printf(sts->output, "%pF:\n", frame->pc);
+               sts->output->printf(sts->output,
+                               "  pc %016lx   sp %016lx   fp %016lx\n",
+                               frame->pc, frame->sp, frame->fp);
+               sts->depth--;
+               return 0;
+       }
+       sts->output->printf(sts->output, "  ...\n");
+
+       return sts->depth == 0;
+}
+
+void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
+               const struct pt_regs *regs, unsigned int depth, void *ssp)
+{
+       struct thread_info *real_thread_info = THREAD_INFO(ssp);
+       struct stacktrace_state sts;
+
+       sts.depth = depth;
+       sts.output = output;
+       *current_thread_info() = *real_thread_info;
+
+       if (!current)
+               output->printf(output, "current NULL\n");
+       else
+               output->printf(output, "pid: %d  comm: %s\n",
+                       current->pid, current->comm);
+       fiq_debugger_dump_regs(output, regs);
+
+       if (!user_mode(regs)) {
+               struct stackframe frame;
+               frame.fp = regs->regs[29];
+               frame.sp = regs->sp;
+               frame.pc = regs->pc;
+               output->printf(output, "\n");
+               walk_stackframe(&frame, report_trace, &sts);
+       }
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h b/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h
new file mode 100644 (file)
index 0000000..d5d051f
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _FIQ_DEBUGGER_PRIV_H_
+#define _FIQ_DEBUGGER_PRIV_H_
+
+#define THREAD_INFO(sp) ((struct thread_info *) \
+               ((unsigned long)(sp) & ~(THREAD_SIZE - 1)))
+
+struct fiq_debugger_output {
+       void (*printf)(struct fiq_debugger_output *output, const char *fmt, ...);
+};
+
+struct pt_regs;
+
+void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
+               const struct pt_regs *regs);
+void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
+               const struct pt_regs *regs);
+void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
+               const struct pt_regs *regs);
+void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
+               const struct pt_regs *regs, unsigned int depth, void *ssp);
+
+#endif
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h b/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
new file mode 100644 (file)
index 0000000..10c3c5d
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
+ *
+ * simple lockless ringbuffer
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+struct fiq_debugger_ringbuf {
+       int len;
+       int head;
+       int tail;
+       u8 buf[];
+};
+
+
+static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len)
+{
+       struct fiq_debugger_ringbuf *rbuf;
+
+       rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL);
+       if (rbuf == NULL)
+               return NULL;
+
+       rbuf->len = len;
+       rbuf->head = 0;
+       rbuf->tail = 0;
+       smp_mb();
+
+       return rbuf;
+}
+
+static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf)
+{
+       kfree(rbuf);
+}
+
+static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf)
+{
+       int level = rbuf->head - rbuf->tail;
+
+       if (level < 0)
+               level = rbuf->len + level;
+
+       return level;
+}
+
+static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf)
+{
+       return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1;
+}
+
+static inline u8
+fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i)
+{
+       return rbuf->buf[(rbuf->tail + i) % rbuf->len];
+}
+
+static inline int
+fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count)
+{
+       count = min(count, fiq_debugger_ringbuf_level(rbuf));
+
+       rbuf->tail = (rbuf->tail + count) % rbuf->len;
+       smp_mb();
+
+       return count;
+}
+
+static inline int
+fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum)
+{
+       if (fiq_debugger_ringbuf_room(rbuf) == 0)
+               return 0;
+
+       rbuf->buf[rbuf->head] = datum;
+       smp_mb();
+       rbuf->head = (rbuf->head + 1) % rbuf->len;
+       smp_mb();
+
+       return 1;
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.c b/drivers/staging/android/fiq_debugger/fiq_watchdog.c
new file mode 100644 (file)
index 0000000..194b541
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/pstore_ram.h>
+
+#include "fiq_watchdog.h"
+#include "fiq_debugger_priv.h"
+
+static DEFINE_RAW_SPINLOCK(fiq_watchdog_lock);
+
+static void fiq_watchdog_printf(struct fiq_debugger_output *output,
+                               const char *fmt, ...)
+{
+       char buf[256];
+       va_list ap;
+       int len;
+
+       va_start(ap, fmt);
+       len = vscnprintf(buf, sizeof(buf), fmt, ap);
+       va_end(ap);
+
+       ramoops_console_write_buf(buf, len);
+}
+
+struct fiq_debugger_output fiq_watchdog_output = {
+       .printf = fiq_watchdog_printf,
+};
+
+void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp)
+{
+       char msg[24];
+       int len;
+
+       raw_spin_lock(&fiq_watchdog_lock);
+
+       len = scnprintf(msg, sizeof(msg), "watchdog fiq cpu %d\n",
+                       THREAD_INFO(svc_sp)->cpu);
+       ramoops_console_write_buf(msg, len);
+
+       fiq_debugger_dump_stacktrace(&fiq_watchdog_output, regs, 100, svc_sp);
+
+       raw_spin_unlock(&fiq_watchdog_lock);
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.h b/drivers/staging/android/fiq_debugger/fiq_watchdog.h
new file mode 100644 (file)
index 0000000..c6b507f
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _FIQ_WATCHDOG_H_
+#define _FIQ_WATCHDOG_H_
+
+void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp);
+
+#endif
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
new file mode 100644 (file)
index 0000000..0f8fec1
--- /dev/null
@@ -0,0 +1,35 @@
+menuconfig ION
+       bool "Ion Memory Manager"
+       depends on HAVE_MEMBLOCK
+       select GENERIC_ALLOCATOR
+       select DMA_SHARED_BUFFER
+       ---help---
+         Chose this option to enable the ION Memory Manager,
+         used by Android to efficiently allocate buffers
+         from userspace that can be shared between drivers.
+         If you're not using Android its probably safe to
+         say N here.
+
+config ION_TEST
+       tristate "Ion Test Device"
+       depends on ION
+       help
+         Choose this option to create a device that can be used to test the
+         kernel and device side ION functions.
+
+config ION_DUMMY
+       bool "Dummy Ion driver"
+       depends on ION
+       help
+         Provides a dummy ION driver that registers the
+         /dev/ion device and some basic heaps. This can
+         be used for testing the ION infrastructure if
+         one doesn't have access to hardware drivers that
+         use ION.
+
+config ION_TEGRA
+       tristate "Ion for Tegra"
+       depends on ARCH_TEGRA && ION
+       help
+         Choose this option if you wish to use ion on an nVidia Tegra.
+
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
new file mode 100644 (file)
index 0000000..b56fd2b
--- /dev/null
@@ -0,0 +1,10 @@
+obj-$(CONFIG_ION) +=   ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
+                       ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
+obj-$(CONFIG_ION_TEST) += ion_test.o
+ifdef CONFIG_COMPAT
+obj-$(CONFIG_ION) += compat_ion.o
+endif
+
+obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o
+obj-$(CONFIG_ION_TEGRA) += tegra/
+
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
new file mode 100644 (file)
index 0000000..a5e8c40
--- /dev/null
@@ -0,0 +1,195 @@
+/*
+ * drivers/gpu/ion/compat_ion.c
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "ion.h"
+#include "compat_ion.h"
+
+/* See drivers/staging/android/uapi/ion.h for the definition of these structs */
+struct compat_ion_allocation_data {
+       compat_size_t len;
+       compat_size_t align;
+       compat_uint_t heap_id_mask;
+       compat_uint_t flags;
+       compat_int_t handle;
+};
+
+struct compat_ion_custom_data {
+       compat_uint_t cmd;
+       compat_ulong_t arg;
+};
+
+struct compat_ion_handle_data {
+       compat_int_t handle;
+};
+
+#define COMPAT_ION_IOC_ALLOC   _IOWR(ION_IOC_MAGIC, 0, \
+                                     struct compat_ion_allocation_data)
+#define COMPAT_ION_IOC_FREE    _IOWR(ION_IOC_MAGIC, 1, \
+                                     struct compat_ion_handle_data)
+#define COMPAT_ION_IOC_CUSTOM  _IOWR(ION_IOC_MAGIC, 6, \
+                                     struct compat_ion_custom_data)
+
+static int compat_get_ion_allocation_data(
+                       struct compat_ion_allocation_data __user *data32,
+                       struct ion_allocation_data __user *data)
+{
+       compat_size_t s;
+       compat_uint_t u;
+       compat_int_t i;
+       int err;
+
+       err = get_user(s, &data32->len);
+       err |= put_user(s, &data->len);
+       err |= get_user(s, &data32->align);
+       err |= put_user(s, &data->align);
+       err |= get_user(u, &data32->heap_id_mask);
+       err |= put_user(u, &data->heap_id_mask);
+       err |= get_user(u, &data32->flags);
+       err |= put_user(u, &data->flags);
+       err |= get_user(i, &data32->handle);
+       err |= put_user(i, &data->handle);
+
+       return err;
+}
+
+static int compat_get_ion_handle_data(
+                       struct compat_ion_handle_data __user *data32,
+                       struct ion_handle_data __user *data)
+{
+       compat_int_t i;
+       int err;
+
+       err = get_user(i, &data32->handle);
+       err |= put_user(i, &data->handle);
+
+       return err;
+}
+
+static int compat_put_ion_allocation_data(
+                       struct compat_ion_allocation_data __user *data32,
+                       struct ion_allocation_data __user *data)
+{
+       compat_size_t s;
+       compat_uint_t u;
+       compat_int_t i;
+       int err;
+
+       err = get_user(s, &data->len);
+       err |= put_user(s, &data32->len);
+       err |= get_user(s, &data->align);
+       err |= put_user(s, &data32->align);
+       err |= get_user(u, &data->heap_id_mask);
+       err |= put_user(u, &data32->heap_id_mask);
+       err |= get_user(u, &data->flags);
+       err |= put_user(u, &data32->flags);
+       err |= get_user(i, &data->handle);
+       err |= put_user(i, &data32->handle);
+
+       return err;
+}
+
+static int compat_get_ion_custom_data(
+                       struct compat_ion_custom_data __user *data32,
+                       struct ion_custom_data __user *data)
+{
+       compat_uint_t cmd;
+       compat_ulong_t arg;
+       int err;
+
+       err = get_user(cmd, &data32->cmd);
+       err |= put_user(cmd, &data->cmd);
+       err |= get_user(arg, &data32->arg);
+       err |= put_user(arg, &data->arg);
+
+       return err;
+};
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       long ret;
+
+       if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+               return -ENOTTY;
+
+       switch (cmd) {
+       case COMPAT_ION_IOC_ALLOC:
+       {
+               struct compat_ion_allocation_data __user *data32;
+               struct ion_allocation_data __user *data;
+               int err;
+
+               data32 = compat_ptr(arg);
+               data = compat_alloc_user_space(sizeof(*data));
+               if (data == NULL)
+                       return -EFAULT;
+
+               err = compat_get_ion_allocation_data(data32, data);
+               if (err)
+                       return err;
+               ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC,
+                                                       (unsigned long)data);
+               err = compat_put_ion_allocation_data(data32, data);
+               return ret ? ret : err;
+       }
+       case COMPAT_ION_IOC_FREE:
+       {
+               struct compat_ion_handle_data __user *data32;
+               struct ion_handle_data __user *data;
+               int err;
+
+               data32 = compat_ptr(arg);
+               data = compat_alloc_user_space(sizeof(*data));
+               if (data == NULL)
+                       return -EFAULT;
+
+               err = compat_get_ion_handle_data(data32, data);
+               if (err)
+                       return err;
+
+               return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE,
+                                                       (unsigned long)data);
+       }
+       case COMPAT_ION_IOC_CUSTOM: {
+               struct compat_ion_custom_data __user *data32;
+               struct ion_custom_data __user *data;
+               int err;
+
+               data32 = compat_ptr(arg);
+               data = compat_alloc_user_space(sizeof(*data));
+               if (data == NULL)
+                       return -EFAULT;
+
+               err = compat_get_ion_custom_data(data32, data);
+               if (err)
+                       return err;
+
+               return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
+                                                       (unsigned long)data);
+       }
+       case ION_IOC_SHARE:
+       case ION_IOC_MAP:
+       case ION_IOC_IMPORT:
+       case ION_IOC_SYNC:
+               return filp->f_op->unlocked_ioctl(filp, cmd,
+                                               (unsigned long)compat_ptr(arg));
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h
new file mode 100644 (file)
index 0000000..3a9c8c0
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+
+ * drivers/gpu/ion/compat_ion.h
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_COMPAT_ION_H
+#define _LINUX_COMPAT_ION_H
+
+#if IS_ENABLED(CONFIG_COMPAT)
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+#else
+
+#define compat_ion_ioctl  NULL
+
+#endif /* CONFIG_COMPAT */
+#endif /* _LINUX_COMPAT_ION_H */
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
new file mode 100644 (file)
index 0000000..48774e3
--- /dev/null
@@ -0,0 +1,1634 @@
+/*
+
+ * drivers/gpu/ion/ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/miscdevice.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/idr.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+#include "compat_ion.h"
+
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev:               the actual misc device
+ * @buffers:           an rb tree of all the existing buffers
+ * @buffer_lock:       lock protecting the tree of buffers
+ * @lock:              rwsem protecting the tree of heaps and clients
+ * @heaps:             list of all the heaps in the system
+ * @user_clients:      list of all the clients created from userspace
+ */
+struct ion_device {
+       struct miscdevice dev;
+       struct rb_root buffers;
+       struct mutex buffer_lock;
+       struct rw_semaphore lock;
+       struct plist_head heaps;
+       long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
+                             unsigned long arg);
+       struct rb_root clients;
+       struct dentry *debug_root;
+       struct dentry *heaps_debug_root;
+       struct dentry *clients_debug_root;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @node:              node in the tree of all clients
+ * @dev:               backpointer to ion device
+ * @handles:           an rb tree of all the handles in this client
+ * @idr:               an idr space for allocating handle ids
+ * @lock:              lock protecting the tree of handles
+ * @name:              used for debugging
+ * @display_name:      used for debugging (unique version of @name)
+ * @display_serial:    used for debugging (to make display_name unique)
+ * @task:              used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+       struct rb_node node;
+       struct ion_device *dev;
+       struct rb_root handles;
+       struct idr idr;
+       struct mutex lock;
+       const char *name;
+       char *display_name;
+       int display_serial;
+       struct task_struct *task;
+       pid_t pid;
+       struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref:               reference count
+ * @client:            back pointer to the client the buffer resides in
+ * @buffer:            pointer to the buffer
+ * @node:              node in the client's handle rbtree
+ * @kmap_cnt:          count of times this client has mapped to kernel
+ * @id:                        client-unique id allocated by client->idr
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client.  Other fields are never changed after initialization.
+ */
+struct ion_handle {
+       struct kref ref;
+       struct ion_client *client;
+       struct ion_buffer *buffer;
+       struct rb_node node;
+       unsigned int kmap_cnt;
+       int id;
+};
+
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
+{
+       return (buffer->flags & ION_FLAG_CACHED) &&
+               !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
+}
+
+bool ion_buffer_cached(struct ion_buffer *buffer)
+{
+       return !!(buffer->flags & ION_FLAG_CACHED);
+}
+
+static inline struct page *ion_buffer_page(struct page *page)
+{
+       return (struct page *)((unsigned long)page & ~(1UL));
+}
+
+static inline bool ion_buffer_page_is_dirty(struct page *page)
+{
+       return !!((unsigned long)page & 1UL);
+}
+
+static inline void ion_buffer_page_dirty(struct page **page)
+{
+       *page = (struct page *)((unsigned long)(*page) | 1UL);
+}
+
+static inline void ion_buffer_page_clean(struct page **page)
+{
+       *page = (struct page *)((unsigned long)(*page) & ~(1UL));
+}
+
+/* this function should only be called while dev->lock is held */
+static void ion_buffer_add(struct ion_device *dev,
+                          struct ion_buffer *buffer)
+{
+       struct rb_node **p = &dev->buffers.rb_node;
+       struct rb_node *parent = NULL;
+       struct ion_buffer *entry;
+
+       while (*p) {
+               parent = *p;
+               entry = rb_entry(parent, struct ion_buffer, node);
+
+               if (buffer < entry) {
+                       p = &(*p)->rb_left;
+               } else if (buffer > entry) {
+                       p = &(*p)->rb_right;
+               } else {
+                       pr_err("%s: buffer already found.", __func__);
+                       BUG();
+               }
+       }
+
+       rb_link_node(&buffer->node, parent, p);
+       rb_insert_color(&buffer->node, &dev->buffers);
+}
+
+/* this function should only be called while dev->lock is held */
+static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+                                    struct ion_device *dev,
+                                    unsigned long len,
+                                    unsigned long align,
+                                    unsigned long flags)
+{
+       struct ion_buffer *buffer;
+       struct sg_table *table;
+       struct scatterlist *sg;
+       int i, ret;
+
+       buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
+       if (!buffer)
+               return ERR_PTR(-ENOMEM);
+
+       buffer->heap = heap;
+       buffer->flags = flags;
+       kref_init(&buffer->ref);
+
+       ret = heap->ops->allocate(heap, buffer, len, align, flags);
+
+       if (ret) {
+               if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
+                       goto err2;
+
+               ion_heap_freelist_drain(heap, 0);
+               ret = heap->ops->allocate(heap, buffer, len, align,
+                                         flags);
+               if (ret)
+                       goto err2;
+       }
+
+       buffer->dev = dev;
+       buffer->size = len;
+
+       table = heap->ops->map_dma(heap, buffer);
+       if (WARN_ONCE(table == NULL,
+                       "heap->ops->map_dma should return ERR_PTR on error"))
+               table = ERR_PTR(-EINVAL);
+       if (IS_ERR(table)) {
+               heap->ops->free(buffer);
+               kfree(buffer);
+               return ERR_PTR(PTR_ERR(table));
+       }
+       buffer->sg_table = table;
+       if (ion_buffer_fault_user_mappings(buffer)) {
+               int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+               struct scatterlist *sg;
+               int i, j, k = 0;
+
+               buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
+               if (!buffer->pages) {
+                       ret = -ENOMEM;
+                       goto err1;
+               }
+
+               for_each_sg(table->sgl, sg, table->nents, i) {
+                       struct page *page = sg_page(sg);
+
+                       for (j = 0; j < sg->length / PAGE_SIZE; j++)
+                               buffer->pages[k++] = page++;
+               }
+
+               if (ret)
+                       goto err;
+       }
+
+       buffer->dev = dev;
+       buffer->size = len;
+       INIT_LIST_HEAD(&buffer->vmas);
+       mutex_init(&buffer->lock);
+       /* this will set up dma addresses for the sglist -- it is not
+          technically correct as per the dma api -- a specific
+          device isn't really taking ownership here.  However, in practice on
+          our systems the only dma_address space is physical addresses.
+          Additionally, we can't afford the overhead of invalidating every
+          allocation via dma_map_sg. The implicit contract here is that
+          memory comming from the heaps is ready for dma, ie if it has a
+          cached mapping that mapping has been invalidated */
+       for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
+               sg_dma_address(sg) = sg_phys(sg);
+       mutex_lock(&dev->buffer_lock);
+       ion_buffer_add(dev, buffer);
+       mutex_unlock(&dev->buffer_lock);
+       return buffer;
+
+err:
+       heap->ops->unmap_dma(heap, buffer);
+       heap->ops->free(buffer);
+err1:
+       if (buffer->pages)
+               vfree(buffer->pages);
+err2:
+       kfree(buffer);
+       return ERR_PTR(ret);
+}
+
+void ion_buffer_destroy(struct ion_buffer *buffer)
+{
+       if (WARN_ON(buffer->kmap_cnt > 0))
+               buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+       buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+       buffer->heap->ops->free(buffer);
+       if (buffer->pages)
+               vfree(buffer->pages);
+       kfree(buffer);
+}
+
+static void _ion_buffer_destroy(struct kref *kref)
+{
+       struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+       struct ion_heap *heap = buffer->heap;
+       struct ion_device *dev = buffer->dev;
+
+       mutex_lock(&dev->buffer_lock);
+       rb_erase(&buffer->node, &dev->buffers);
+       mutex_unlock(&dev->buffer_lock);
+
+       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+               ion_heap_freelist_add(heap, buffer);
+       else
+               ion_buffer_destroy(buffer);
+}
+
+static void ion_buffer_get(struct ion_buffer *buffer)
+{
+       kref_get(&buffer->ref);
+}
+
+static int ion_buffer_put(struct ion_buffer *buffer)
+{
+       return kref_put(&buffer->ref, _ion_buffer_destroy);
+}
+
+static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
+{
+       mutex_lock(&buffer->lock);
+       buffer->handle_count++;
+       mutex_unlock(&buffer->lock);
+}
+
+static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
+{
+       /*
+        * when a buffer is removed from a handle, if it is not in
+        * any other handles, copy the taskcomm and the pid of the
+        * process it's being removed from into the buffer.  At this
+        * point there will be no way to track what processes this buffer is
+        * being used by, it only exists as a dma_buf file descriptor.
+        * The taskcomm and pid can provide a debug hint as to where this fd
+        * is in the system
+        */
+       mutex_lock(&buffer->lock);
+       buffer->handle_count--;
+       BUG_ON(buffer->handle_count < 0);
+       if (!buffer->handle_count) {
+               struct task_struct *task;
+
+               task = current->group_leader;
+               get_task_comm(buffer->task_comm, task);
+               buffer->pid = task_pid_nr(task);
+       }
+       mutex_unlock(&buffer->lock);
+}
+
+static struct ion_handle *ion_handle_create(struct ion_client *client,
+                                    struct ion_buffer *buffer)
+{
+       struct ion_handle *handle;
+
+       handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
+       if (!handle)
+               return ERR_PTR(-ENOMEM);
+       kref_init(&handle->ref);
+       RB_CLEAR_NODE(&handle->node);
+       handle->client = client;
+       ion_buffer_get(buffer);
+       ion_buffer_add_to_handle(buffer);
+       handle->buffer = buffer;
+
+       return handle;
+}
+
+static void ion_handle_kmap_put(struct ion_handle *);
+
+static void ion_handle_destroy(struct kref *kref)
+{
+       struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
+       struct ion_client *client = handle->client;
+       struct ion_buffer *buffer = handle->buffer;
+
+       mutex_lock(&buffer->lock);
+       while (handle->kmap_cnt)
+               ion_handle_kmap_put(handle);
+       mutex_unlock(&buffer->lock);
+
+       idr_remove(&client->idr, handle->id);
+       if (!RB_EMPTY_NODE(&handle->node))
+               rb_erase(&handle->node, &client->handles);
+
+       ion_buffer_remove_from_handle(buffer);
+       ion_buffer_put(buffer);
+
+       kfree(handle);
+}
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
+{
+       return handle->buffer;
+}
+
+static void ion_handle_get(struct ion_handle *handle)
+{
+       kref_get(&handle->ref);
+}
+
+static int ion_handle_put(struct ion_handle *handle)
+{
+       struct ion_client *client = handle->client;
+       int ret;
+
+       mutex_lock(&client->lock);
+       ret = kref_put(&handle->ref, ion_handle_destroy);
+       mutex_unlock(&client->lock);
+
+       return ret;
+}
+
+static struct ion_handle *ion_handle_lookup(struct ion_client *client,
+                                           struct ion_buffer *buffer)
+{
+       struct rb_node *n = client->handles.rb_node;
+
+       while (n) {
+               struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
+               if (buffer < entry->buffer)
+                       n = n->rb_left;
+               else if (buffer > entry->buffer)
+                       n = n->rb_right;
+               else
+                       return entry;
+       }
+       return ERR_PTR(-EINVAL);
+}
+
+static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+                                               int id)
+{
+       struct ion_handle *handle;
+
+       mutex_lock(&client->lock);
+       handle = idr_find(&client->idr, id);
+       if (handle)
+               ion_handle_get(handle);
+       mutex_unlock(&client->lock);
+
+       return handle ? handle : ERR_PTR(-EINVAL);
+}
+
+static bool ion_handle_validate(struct ion_client *client,
+                               struct ion_handle *handle)
+{
+       WARN_ON(!mutex_is_locked(&client->lock));
+       return (idr_find(&client->idr, handle->id) == handle);
+}
+
+static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+{
+       int id;
+       struct rb_node **p = &client->handles.rb_node;
+       struct rb_node *parent = NULL;
+       struct ion_handle *entry;
+
+       id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
+       if (id < 0)
+               return id;
+
+       handle->id = id;
+
+       while (*p) {
+               parent = *p;
+               entry = rb_entry(parent, struct ion_handle, node);
+
+               if (handle->buffer < entry->buffer)
+                       p = &(*p)->rb_left;
+               else if (handle->buffer > entry->buffer)
+                       p = &(*p)->rb_right;
+               else
+                       WARN(1, "%s: buffer already found.", __func__);
+       }
+
+       rb_link_node(&handle->node, parent, p);
+       rb_insert_color(&handle->node, &client->handles);
+
+       return 0;
+}
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+                            size_t align, unsigned int heap_id_mask,
+                            unsigned int flags)
+{
+       struct ion_handle *handle;
+       struct ion_device *dev = client->dev;
+       struct ion_buffer *buffer = NULL;
+       struct ion_heap *heap;
+       int ret;
+
+       pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
+                len, align, heap_id_mask, flags);
+       /*
+        * traverse the list of heaps available in this system in priority
+        * order.  If the heap type is supported by the client, and matches the
+        * request of the caller allocate from it.  Repeat until allocate has
+        * succeeded or all heaps have been tried
+        */
+       len = PAGE_ALIGN(len);
+
+       if (!len)
+               return ERR_PTR(-EINVAL);
+
+       down_read(&dev->lock);
+       plist_for_each_entry(heap, &dev->heaps, node) {
+               /* if the caller didn't specify this heap id */
+               if (!((1 << heap->id) & heap_id_mask))
+                       continue;
+               buffer = ion_buffer_create(heap, dev, len, align, flags);
+               if (!IS_ERR(buffer))
+                       break;
+       }
+       up_read(&dev->lock);
+
+       if (buffer == NULL)
+               return ERR_PTR(-ENODEV);
+
+       if (IS_ERR(buffer))
+               return ERR_PTR(PTR_ERR(buffer));
+
+       handle = ion_handle_create(client, buffer);
+
+       /*
+        * ion_buffer_create will create a buffer with a ref_cnt of 1,
+        * and ion_handle_create will take a second reference, drop one here
+        */
+       ion_buffer_put(buffer);
+
+       if (IS_ERR(handle))
+               return handle;
+
+       mutex_lock(&client->lock);
+       ret = ion_handle_add(client, handle);
+       mutex_unlock(&client->lock);
+       if (ret) {
+               ion_handle_put(handle);
+               handle = ERR_PTR(ret);
+       }
+
+       return handle;
+}
+EXPORT_SYMBOL(ion_alloc);
+
+void ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+       bool valid_handle;
+
+       BUG_ON(client != handle->client);
+
+       mutex_lock(&client->lock);
+       valid_handle = ion_handle_validate(client, handle);
+
+       if (!valid_handle) {
+               WARN(1, "%s: invalid handle passed to free.\n", __func__);
+               mutex_unlock(&client->lock);
+               return;
+       }
+       mutex_unlock(&client->lock);
+       ion_handle_put(handle);
+}
+EXPORT_SYMBOL(ion_free);
+
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+            ion_phys_addr_t *addr, size_t *len)
+{
+       struct ion_buffer *buffer;
+       int ret;
+
+       mutex_lock(&client->lock);
+       if (!ion_handle_validate(client, handle)) {
+               mutex_unlock(&client->lock);
+               return -EINVAL;
+       }
+
+       buffer = handle->buffer;
+
+       if (!buffer->heap->ops->phys) {
+               pr_err("%s: ion_phys is not implemented by this heap.\n",
+                      __func__);
+               mutex_unlock(&client->lock);
+               return -ENODEV;
+       }
+       mutex_unlock(&client->lock);
+       ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+       return ret;
+}
+EXPORT_SYMBOL(ion_phys);
+
+static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
+{
+       void *vaddr;
+
+       if (buffer->kmap_cnt) {
+               buffer->kmap_cnt++;
+               return buffer->vaddr;
+       }
+       vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+       if (WARN_ONCE(vaddr == NULL,
+                       "heap->ops->map_kernel should return ERR_PTR on error"))
+               return ERR_PTR(-EINVAL);
+       if (IS_ERR(vaddr))
+               return vaddr;
+       buffer->vaddr = vaddr;
+       buffer->kmap_cnt++;
+       return vaddr;
+}
+
+static void *ion_handle_kmap_get(struct ion_handle *handle)
+{
+       struct ion_buffer *buffer = handle->buffer;
+       void *vaddr;
+
+       if (handle->kmap_cnt) {
+               handle->kmap_cnt++;
+               return buffer->vaddr;
+       }
+       vaddr = ion_buffer_kmap_get(buffer);
+       if (IS_ERR(vaddr))
+               return vaddr;
+       handle->kmap_cnt++;
+       return vaddr;
+}
+
+static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+{
+       buffer->kmap_cnt--;
+       if (!buffer->kmap_cnt) {
+               buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+               buffer->vaddr = NULL;
+       }
+}
+
+static void ion_handle_kmap_put(struct ion_handle *handle)
+{
+       struct ion_buffer *buffer = handle->buffer;
+
+       handle->kmap_cnt--;
+       if (!handle->kmap_cnt)
+               ion_buffer_kmap_put(buffer);
+}
+
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+       struct ion_buffer *buffer;
+       void *vaddr;
+
+       mutex_lock(&client->lock);
+       if (!ion_handle_validate(client, handle)) {
+               pr_err("%s: invalid handle passed to map_kernel.\n",
+                      __func__);
+               mutex_unlock(&client->lock);
+               return ERR_PTR(-EINVAL);
+       }
+
+       buffer = handle->buffer;
+
+       if (!handle->buffer->heap->ops->map_kernel) {
+               pr_err("%s: map_kernel is not implemented by this heap.\n",
+                      __func__);
+               mutex_unlock(&client->lock);
+               return ERR_PTR(-ENODEV);
+       }
+
+       mutex_lock(&buffer->lock);
+       vaddr = ion_handle_kmap_get(handle);
+       mutex_unlock(&buffer->lock);
+       mutex_unlock(&client->lock);
+       return vaddr;
+}
+EXPORT_SYMBOL(ion_map_kernel);
+
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+       struct ion_buffer *buffer;
+
+       mutex_lock(&client->lock);
+       buffer = handle->buffer;
+       mutex_lock(&buffer->lock);
+       ion_handle_kmap_put(handle);
+       mutex_unlock(&buffer->lock);
+       mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_unmap_kernel);
+
+static int ion_debug_client_show(struct seq_file *s, void *unused)
+{
+       struct ion_client *client = s->private;
+       struct rb_node *n;
+       size_t sizes[ION_NUM_HEAP_IDS] = {0};
+       const char *names[ION_NUM_HEAP_IDS] = {NULL};
+       int i;
+
+       mutex_lock(&client->lock);
+       for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+               struct ion_handle *handle = rb_entry(n, struct ion_handle,
+                                                    node);
+               unsigned int id = handle->buffer->heap->id;
+
+               if (!names[id])
+                       names[id] = handle->buffer->heap->name;
+               sizes[id] += handle->buffer->size;
+       }
+       mutex_unlock(&client->lock);
+
+       seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
+       for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
+               if (!names[i])
+                       continue;
+               seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
+       }
+       return 0;
+}
+
+static int ion_debug_client_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, ion_debug_client_show, inode->i_private);
+}
+
+static const struct file_operations debug_client_fops = {
+       .open = ion_debug_client_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int ion_get_client_serial(const struct rb_root *root,
+                                       const unsigned char *name)
+{
+       int serial = -1;
+       struct rb_node *node;
+       for (node = rb_first(root); node; node = rb_next(node)) {
+               struct ion_client *client = rb_entry(node, struct ion_client,
+                                               node);
+               if (strcmp(client->name, name))
+                       continue;
+               serial = max(serial, client->display_serial);
+       }
+       return serial + 1;
+}
+
+struct ion_client *ion_client_create(struct ion_device *dev,
+                                    const char *name)
+{
+       struct ion_client *client;
+       struct task_struct *task;
+       struct rb_node **p;
+       struct rb_node *parent = NULL;
+       struct ion_client *entry;
+       pid_t pid;
+
+       if (!name) {
+               pr_err("%s: Name cannot be null\n", __func__);
+               return ERR_PTR(-EINVAL);
+       }
+
+       get_task_struct(current->group_leader);
+       task_lock(current->group_leader);
+       pid = task_pid_nr(current->group_leader);
+       /* don't bother to store task struct for kernel threads,
+          they can't be killed anyway */
+       if (current->group_leader->flags & PF_KTHREAD) {
+               put_task_struct(current->group_leader);
+               task = NULL;
+       } else {
+               task = current->group_leader;
+       }
+       task_unlock(current->group_leader);
+
+       client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
+       if (!client)
+               goto err_put_task_struct;
+
+       client->dev = dev;
+       client->handles = RB_ROOT;
+       idr_init(&client->idr);
+       mutex_init(&client->lock);
+       client->task = task;
+       client->pid = pid;
+       client->name = kstrdup(name, GFP_KERNEL);
+       if (!client->name)
+               goto err_free_client;
+
+       down_write(&dev->lock);
+       client->display_serial = ion_get_client_serial(&dev->clients, name);
+       client->display_name = kasprintf(
+               GFP_KERNEL, "%s-%d", name, client->display_serial);
+       if (!client->display_name) {
+               up_write(&dev->lock);
+               goto err_free_client_name;
+       }
+       p = &dev->clients.rb_node;
+       while (*p) {
+               parent = *p;
+               entry = rb_entry(parent, struct ion_client, node);
+
+               if (client < entry)
+                       p = &(*p)->rb_left;
+               else if (client > entry)
+                       p = &(*p)->rb_right;
+       }
+       rb_link_node(&client->node, parent, p);
+       rb_insert_color(&client->node, &dev->clients);
+
+       client->debug_root = debugfs_create_file(client->display_name, 0664,
+                                               dev->clients_debug_root,
+                                               client, &debug_client_fops);
+       if (!client->debug_root) {
+               char buf[256], *path;
+               path = dentry_path(dev->clients_debug_root, buf, 256);
+               pr_err("Failed to create client debugfs at %s/%s\n",
+                       path, client->display_name);
+       }
+
+       up_write(&dev->lock);
+
+       return client;
+
+err_free_client_name:
+       kfree(client->name);
+err_free_client:
+       kfree(client);
+err_put_task_struct:
+       if (task)
+               put_task_struct(current->group_leader);
+       return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(ion_client_create);
+
+void ion_client_destroy(struct ion_client *client)
+{
+       struct ion_device *dev = client->dev;
+       struct rb_node *n;
+
+       pr_debug("%s: %d\n", __func__, __LINE__);
+       while ((n = rb_first(&client->handles))) {
+               struct ion_handle *handle = rb_entry(n, struct ion_handle,
+                                                    node);
+               ion_handle_destroy(&handle->ref);
+       }
+
+       idr_destroy(&client->idr);
+
+       down_write(&dev->lock);
+       if (client->task)
+               put_task_struct(client->task);
+       rb_erase(&client->node, &dev->clients);
+       debugfs_remove_recursive(client->debug_root);
+       up_write(&dev->lock);
+
+       kfree(client->display_name);
+       kfree(client->name);
+       kfree(client);
+}
+EXPORT_SYMBOL(ion_client_destroy);
+
+struct sg_table *ion_sg_table(struct ion_client *client,
+                             struct ion_handle *handle)
+{
+       struct ion_buffer *buffer;
+       struct sg_table *table;
+
+       mutex_lock(&client->lock);
+       if (!ion_handle_validate(client, handle)) {
+               pr_err("%s: invalid handle passed to map_dma.\n",
+                      __func__);
+               mutex_unlock(&client->lock);
+               return ERR_PTR(-EINVAL);
+       }
+       buffer = handle->buffer;
+       table = buffer->sg_table;
+       mutex_unlock(&client->lock);
+       return table;
+}
+EXPORT_SYMBOL(ion_sg_table);
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+                                      struct device *dev,
+                                      enum dma_data_direction direction);
+
+static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
+                                       enum dma_data_direction direction)
+{
+       struct dma_buf *dmabuf = attachment->dmabuf;
+       struct ion_buffer *buffer = dmabuf->priv;
+
+       ion_buffer_sync_for_device(buffer, attachment->dev, direction);
+       return buffer->sg_table;
+}
+
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+                             struct sg_table *table,
+                             enum dma_data_direction direction)
+{
+}
+
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+               size_t size, enum dma_data_direction dir)
+{
+       struct scatterlist sg;
+
+       sg_init_table(&sg, 1);
+       sg_set_page(&sg, page, size, 0);
+       /*
+        * This is not correct - sg_dma_address needs a dma_addr_t that is valid
+        * for the the targeted device, but this works on the currently targeted
+        * hardware.
+        */
+       sg_dma_address(&sg) = page_to_phys(page);
+       dma_sync_sg_for_device(dev, &sg, 1, dir);
+}
+
+struct ion_vma_list {
+       struct list_head list;
+       struct vm_area_struct *vma;
+};
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+                                      struct device *dev,
+                                      enum dma_data_direction dir)
+{
+       struct ion_vma_list *vma_list;
+       int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+       int i;
+
+       pr_debug("%s: syncing for device %s\n", __func__,
+                dev ? dev_name(dev) : "null");
+
+       if (!ion_buffer_fault_user_mappings(buffer))
+               return;
+
+       mutex_lock(&buffer->lock);
+       for (i = 0; i < pages; i++) {
+               struct page *page = buffer->pages[i];
+
+               if (ion_buffer_page_is_dirty(page))
+                       ion_pages_sync_for_device(dev, ion_buffer_page(page),
+                                                       PAGE_SIZE, dir);
+
+               ion_buffer_page_clean(buffer->pages + i);
+       }
+       list_for_each_entry(vma_list, &buffer->vmas, list) {
+               struct vm_area_struct *vma = vma_list->vma;
+
+               zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
+                              NULL);
+       }
+       mutex_unlock(&buffer->lock);
+}
+
+static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct ion_buffer *buffer = vma->vm_private_data;
+       unsigned long pfn;
+       int ret;
+
+       mutex_lock(&buffer->lock);
+       ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
+       BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
+
+       pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
+       ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+       mutex_unlock(&buffer->lock);
+       if (ret)
+               return VM_FAULT_ERROR;
+
+       return VM_FAULT_NOPAGE;
+}
+
+static void ion_vm_open(struct vm_area_struct *vma)
+{
+       struct ion_buffer *buffer = vma->vm_private_data;
+       struct ion_vma_list *vma_list;
+
+       vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
+       if (!vma_list)
+               return;
+       vma_list->vma = vma;
+       mutex_lock(&buffer->lock);
+       list_add(&vma_list->list, &buffer->vmas);
+       mutex_unlock(&buffer->lock);
+       pr_debug("%s: adding %p\n", __func__, vma);
+}
+
+static void ion_vm_close(struct vm_area_struct *vma)
+{
+       struct ion_buffer *buffer = vma->vm_private_data;
+       struct ion_vma_list *vma_list, *tmp;
+
+       pr_debug("%s\n", __func__);
+       mutex_lock(&buffer->lock);
+       list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
+               if (vma_list->vma != vma)
+                       continue;
+               list_del(&vma_list->list);
+               kfree(vma_list);
+               pr_debug("%s: deleting %p\n", __func__, vma);
+               break;
+       }
+       mutex_unlock(&buffer->lock);
+}
+
+static struct vm_operations_struct ion_vma_ops = {
+       .open = ion_vm_open,
+       .close = ion_vm_close,
+       .fault = ion_vm_fault,
+};
+
+static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+       struct ion_buffer *buffer = dmabuf->priv;
+       int ret = 0;
+
+       if (!buffer->heap->ops->map_user) {
+               pr_err("%s: this heap does not define a method for mapping "
+                      "to userspace\n", __func__);
+               return -EINVAL;
+       }
+
+       if (ion_buffer_fault_user_mappings(buffer)) {
+               vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
+                                                       VM_DONTDUMP;
+               vma->vm_private_data = buffer;
+               vma->vm_ops = &ion_vma_ops;
+               ion_vm_open(vma);
+               return 0;
+       }
+
+       if (!(buffer->flags & ION_FLAG_CACHED))
+               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+       mutex_lock(&buffer->lock);
+       /* now map it to userspace */
+       ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+       mutex_unlock(&buffer->lock);
+
+       if (ret)
+               pr_err("%s: failure mapping buffer to userspace\n",
+                      __func__);
+
+       return ret;
+}
+
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+       struct ion_buffer *buffer = dmabuf->priv;
+       ion_buffer_put(buffer);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+       struct ion_buffer *buffer = dmabuf->priv;
+       return buffer->vaddr + offset * PAGE_SIZE;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+                              void *ptr)
+{
+       return;
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
+                                       size_t len,
+                                       enum dma_data_direction direction)
+{
+       struct ion_buffer *buffer = dmabuf->priv;
+       void *vaddr;
+
+       if (!buffer->heap->ops->map_kernel) {
+               pr_err("%s: map kernel is not implemented by this heap.\n",
+                      __func__);
+               return -ENODEV;
+       }
+
+       mutex_lock(&buffer->lock);
+       vaddr = ion_buffer_kmap_get(buffer);
+       mutex_unlock(&buffer->lock);
+       if (IS_ERR(vaddr))
+               return PTR_ERR(vaddr);
+       return 0;
+}
+
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
+                                      size_t len,
+                                      enum dma_data_direction direction)
+{
+       struct ion_buffer *buffer = dmabuf->priv;
+
+       mutex_lock(&buffer->lock);
+       ion_buffer_kmap_put(buffer);
+       mutex_unlock(&buffer->lock);
+}
+
+static struct dma_buf_ops dma_buf_ops = {
+       .map_dma_buf = ion_map_dma_buf,
+       .unmap_dma_buf = ion_unmap_dma_buf,
+       .mmap = ion_mmap,
+       .release = ion_dma_buf_release,
+       .begin_cpu_access = ion_dma_buf_begin_cpu_access,
+       .end_cpu_access = ion_dma_buf_end_cpu_access,
+       .kmap_atomic = ion_dma_buf_kmap,
+       .kunmap_atomic = ion_dma_buf_kunmap,
+       .kmap = ion_dma_buf_kmap,
+       .kunmap = ion_dma_buf_kunmap,
+};
+
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+                                               struct ion_handle *handle)
+{
+       struct ion_buffer *buffer;
+       struct dma_buf *dmabuf;
+       bool valid_handle;
+
+       mutex_lock(&client->lock);
+       valid_handle = ion_handle_validate(client, handle);
+       if (!valid_handle) {
+               WARN(1, "%s: invalid handle passed to share.\n", __func__);
+               mutex_unlock(&client->lock);
+               return ERR_PTR(-EINVAL);
+       }
+       buffer = handle->buffer;
+       ion_buffer_get(buffer);
+       mutex_unlock(&client->lock);
+
+       dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
+       if (IS_ERR(dmabuf)) {
+               ion_buffer_put(buffer);
+               return dmabuf;
+       }
+
+       return dmabuf;
+}
+EXPORT_SYMBOL(ion_share_dma_buf);
+
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
+{
+       struct dma_buf *dmabuf;
+       int fd;
+
+       dmabuf = ion_share_dma_buf(client, handle);
+       if (IS_ERR(dmabuf))
+               return PTR_ERR(dmabuf);
+
+       fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+       if (fd < 0)
+               dma_buf_put(dmabuf);
+
+       return fd;
+}
+EXPORT_SYMBOL(ion_share_dma_buf_fd);
+
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+       struct dma_buf *dmabuf;
+       struct ion_buffer *buffer;
+       struct ion_handle *handle;
+       int ret;
+
+       dmabuf = dma_buf_get(fd);
+       if (IS_ERR(dmabuf))
+               return ERR_PTR(PTR_ERR(dmabuf));
+       /* if this memory came from ion */
+
+       if (dmabuf->ops != &dma_buf_ops) {
+               pr_err("%s: can not import dmabuf from another exporter\n",
+                      __func__);
+               dma_buf_put(dmabuf);
+               return ERR_PTR(-EINVAL);
+       }
+       buffer = dmabuf->priv;
+
+       mutex_lock(&client->lock);
+       /* if a handle exists for this buffer just take a reference to it */
+       handle = ion_handle_lookup(client, buffer);
+       if (!IS_ERR(handle)) {
+               ion_handle_get(handle);
+               mutex_unlock(&client->lock);
+               goto end;
+       }
+       mutex_unlock(&client->lock);
+
+       handle = ion_handle_create(client, buffer);
+       if (IS_ERR(handle))
+               goto end;
+
+       mutex_lock(&client->lock);
+       ret = ion_handle_add(client, handle);
+       mutex_unlock(&client->lock);
+       if (ret) {
+               ion_handle_put(handle);
+               handle = ERR_PTR(ret);
+       }
+
+end:
+       dma_buf_put(dmabuf);
+       return handle;
+}
+EXPORT_SYMBOL(ion_import_dma_buf);
+
+static int ion_sync_for_device(struct ion_client *client, int fd)
+{
+       struct dma_buf *dmabuf;
+       struct ion_buffer *buffer;
+
+       dmabuf = dma_buf_get(fd);
+       if (IS_ERR(dmabuf))
+               return PTR_ERR(dmabuf);
+
+       /* if this memory came from ion */
+       if (dmabuf->ops != &dma_buf_ops) {
+               pr_err("%s: can not sync dmabuf from another exporter\n",
+                      __func__);
+               dma_buf_put(dmabuf);
+               return -EINVAL;
+       }
+       buffer = dmabuf->priv;
+
+       dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+                              buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+       dma_buf_put(dmabuf);
+       return 0;
+}
+
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
+{
+       switch (cmd) {
+       case ION_IOC_SYNC:
+       case ION_IOC_FREE:
+       case ION_IOC_CUSTOM:
+               return _IOC_WRITE;
+       default:
+               return _IOC_DIR(cmd);
+       }
+}
+
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       struct ion_client *client = filp->private_data;
+       struct ion_device *dev = client->dev;
+       struct ion_handle *cleanup_handle = NULL;
+       int ret = 0;
+       unsigned int dir;
+
+       union {
+               struct ion_fd_data fd;
+               struct ion_allocation_data allocation;
+               struct ion_handle_data handle;
+               struct ion_custom_data custom;
+       } data;
+
+       dir = ion_ioctl_dir(cmd);
+
+       if (_IOC_SIZE(cmd) > sizeof(data))
+               return -EINVAL;
+
+       if (dir & _IOC_WRITE)
+               if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+                       return -EFAULT;
+
+       switch (cmd) {
+       case ION_IOC_ALLOC:
+       {
+               struct ion_handle *handle;
+
+               handle = ion_alloc(client, data.allocation.len,
+                                               data.allocation.align,
+                                               data.allocation.heap_id_mask,
+                                               data.allocation.flags);
+               if (IS_ERR(handle))
+                       return PTR_ERR(handle);
+
+               data.allocation.handle = handle->id;
+
+               cleanup_handle = handle;
+               break;
+       }
+       case ION_IOC_FREE:
+       {
+               struct ion_handle *handle;
+
+               handle = ion_handle_get_by_id(client, data.handle.handle);
+               if (IS_ERR(handle))
+                       return PTR_ERR(handle);
+               ion_free(client, handle);
+               ion_handle_put(handle);
+               break;
+       }
+       case ION_IOC_SHARE:
+       case ION_IOC_MAP:
+       {
+               struct ion_handle *handle;
+
+               handle = ion_handle_get_by_id(client, data.handle.handle);
+               if (IS_ERR(handle))
+                       return PTR_ERR(handle);
+               data.fd.fd = ion_share_dma_buf_fd(client, handle);
+               ion_handle_put(handle);
+               if (data.fd.fd < 0)
+                       ret = data.fd.fd;
+               break;
+       }
+       case ION_IOC_IMPORT:
+       {
+               struct ion_handle *handle;
+               handle = ion_import_dma_buf(client, data.fd.fd);
+               if (IS_ERR(handle))
+                       ret = PTR_ERR(handle);
+               else
+                       data.handle.handle = handle->id;
+               break;
+       }
+       case ION_IOC_SYNC:
+       {
+               ret = ion_sync_for_device(client, data.fd.fd);
+               break;
+       }
+       case ION_IOC_CUSTOM:
+       {
+               if (!dev->custom_ioctl)
+                       return -ENOTTY;
+               ret = dev->custom_ioctl(client, data.custom.cmd,
+                                               data.custom.arg);
+               break;
+       }
+       default:
+               return -ENOTTY;
+       }
+
+       if (dir & _IOC_READ) {
+               if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+                       if (cleanup_handle)
+                               ion_free(client, cleanup_handle);
+                       return -EFAULT;
+               }
+       }
+       return ret;
+}
+
+static int ion_release(struct inode *inode, struct file *file)
+{
+       struct ion_client *client = file->private_data;
+
+       pr_debug("%s: %d\n", __func__, __LINE__);
+       ion_client_destroy(client);
+       return 0;
+}
+
+static int ion_open(struct inode *inode, struct file *file)
+{
+       struct miscdevice *miscdev = file->private_data;
+       struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
+       struct ion_client *client;
+       char debug_name[64];
+
+       pr_debug("%s: %d\n", __func__, __LINE__);
+       snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
+       client = ion_client_create(dev, debug_name);
+       if (IS_ERR(client))
+               return PTR_ERR(client);
+       file->private_data = client;
+
+       return 0;
+}
+
+static const struct file_operations ion_fops = {
+       .owner          = THIS_MODULE,
+       .open           = ion_open,
+       .release        = ion_release,
+       .unlocked_ioctl = ion_ioctl,
+       .compat_ioctl   = compat_ion_ioctl,
+};
+
+static size_t ion_debug_heap_total(struct ion_client *client,
+                                  unsigned int id)
+{
+       size_t size = 0;
+       struct rb_node *n;
+
+       mutex_lock(&client->lock);
+       for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+               struct ion_handle *handle = rb_entry(n,
+                                                    struct ion_handle,
+                                                    node);
+               if (handle->buffer->heap->id == id)
+                       size += handle->buffer->size;
+       }
+       mutex_unlock(&client->lock);
+       return size;
+}
+
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+       struct ion_heap *heap = s->private;
+       struct ion_device *dev = heap->dev;
+       struct rb_node *n;
+       size_t total_size = 0;
+       size_t total_orphaned_size = 0;
+
+       seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
+       seq_printf(s, "----------------------------------------------------\n");
+
+       for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
+               struct ion_client *client = rb_entry(n, struct ion_client,
+                                                    node);
+               size_t size = ion_debug_heap_total(client, heap->id);
+               if (!size)
+                       continue;
+               if (client->task) {
+                       char task_comm[TASK_COMM_LEN];
+
+                       get_task_comm(task_comm, client->task);
+                       seq_printf(s, "%16.s %16u %16zu\n", task_comm,
+                                  client->pid, size);
+               } else {
+                       seq_printf(s, "%16.s %16u %16zu\n", client->name,
+                                  client->pid, size);
+               }
+       }
+       seq_printf(s, "----------------------------------------------------\n");
+       seq_printf(s, "orphaned allocations (info is from last known client):"
+                  "\n");
+       mutex_lock(&dev->buffer_lock);
+       for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+               struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
+                                                    node);
+               if (buffer->heap->id != heap->id)
+                       continue;
+               total_size += buffer->size;
+               if (!buffer->handle_count) {
+                       seq_printf(s, "%16.s %16u %16zu %d %d\n",
+                                  buffer->task_comm, buffer->pid,
+                                  buffer->size, buffer->kmap_cnt,
+                                  atomic_read(&buffer->ref.refcount));
+                       total_orphaned_size += buffer->size;
+               }
+       }
+       mutex_unlock(&dev->buffer_lock);
+       seq_printf(s, "----------------------------------------------------\n");
+       seq_printf(s, "%16.s %16zu\n", "total orphaned",
+                  total_orphaned_size);
+       seq_printf(s, "%16.s %16zu\n", "total ", total_size);
+       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+               seq_printf(s, "%16.s %16zu\n", "deferred free",
+                               heap->free_list_size);
+       seq_printf(s, "----------------------------------------------------\n");
+
+       if (heap->debug_show)
+               heap->debug_show(heap, s, unused);
+
+       return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+       .open = ion_debug_heap_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+#ifdef DEBUG_HEAP_SHRINKER
+static int debug_shrink_set(void *data, u64 val)
+{
+       struct ion_heap *heap = data;
+       struct shrink_control sc;
+       int objs;
+
+       sc.gfp_mask = -1;
+       sc.nr_to_scan = 0;
+
+       if (!val)
+               return 0;
+
+       objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+       sc.nr_to_scan = objs;
+
+       heap->shrinker.shrink(&heap->shrinker, &sc);
+       return 0;
+}
+
+static int debug_shrink_get(void *data, u64 *val)
+{
+       struct ion_heap *heap = data;
+       struct shrink_control sc;
+       int objs;
+
+       sc.gfp_mask = -1;
+       sc.nr_to_scan = 0;
+
+       objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+       *val = objs;
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
+                       debug_shrink_set, "%llu\n");
+#endif
+
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+{
+       struct dentry *debug_file;
+
+       if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+           !heap->ops->unmap_dma)
+               pr_err("%s: can not add heap with invalid ops struct.\n",
+                      __func__);
+
+       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+               ion_heap_init_deferred_free(heap);
+
+       if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
+               ion_heap_init_shrinker(heap);
+
+       heap->dev = dev;
+       down_write(&dev->lock);
+       /* use negative heap->id to reverse the priority -- when traversing
+          the list later attempt higher id numbers first */
+       plist_node_init(&heap->node, -heap->id);
+       plist_add(&heap->node, &dev->heaps);
+       debug_file = debugfs_create_file(heap->name, 0664,
+                                       dev->heaps_debug_root, heap,
+                                       &debug_heap_fops);
+
+       if (!debug_file) {
+               char buf[256], *path;
+               path = dentry_path(dev->heaps_debug_root, buf, 256);
+               pr_err("Failed to create heap debugfs at %s/%s\n",
+                       path, heap->name);
+       }
+
+#ifdef DEBUG_HEAP_SHRINKER
+       if (heap->shrinker.shrink) {
+               char debug_name[64];
+
+               snprintf(debug_name, 64, "%s_shrink", heap->name);
+               debug_file = debugfs_create_file(
+                       debug_name, 0644, dev->heaps_debug_root, heap,
+                       &debug_shrink_fops);
+               if (!debug_file) {
+                       char buf[256], *path;
+                       path = dentry_path(dev->heaps_debug_root, buf, 256);
+                       pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
+                               path, debug_name);
+               }
+       }
+#endif
+       up_write(&dev->lock);
+}
+
+struct ion_device *ion_device_create(long (*custom_ioctl)
+                                    (struct ion_client *client,
+                                     unsigned int cmd,
+                                     unsigned long arg))
+{
+       struct ion_device *idev;
+       int ret;
+
+       idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
+       if (!idev)
+               return ERR_PTR(-ENOMEM);
+
+       idev->dev.minor = MISC_DYNAMIC_MINOR;
+       idev->dev.name = "ion";
+       idev->dev.fops = &ion_fops;
+       idev->dev.parent = NULL;
+       ret = misc_register(&idev->dev);
+       if (ret) {
+               pr_err("ion: failed to register misc device.\n");
+               return ERR_PTR(ret);
+       }
+
+       idev->debug_root = debugfs_create_dir("ion", NULL);
+       if (!idev->debug_root) {
+               pr_err("ion: failed to create debugfs root directory.\n");
+               goto debugfs_done;
+       }
+       idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
+       if (!idev->heaps_debug_root) {
+               pr_err("ion: failed to create debugfs heaps directory.\n");
+               goto debugfs_done;
+       }
+       idev->clients_debug_root = debugfs_create_dir("clients",
+                                               idev->debug_root);
+       if (!idev->clients_debug_root)
+               pr_err("ion: failed to create debugfs clients directory.\n");
+
+debugfs_done:
+
+       idev->custom_ioctl = custom_ioctl;
+       idev->buffers = RB_ROOT;
+       mutex_init(&idev->buffer_lock);
+       init_rwsem(&idev->lock);
+       plist_head_init(&idev->heaps);
+       idev->clients = RB_ROOT;
+       return idev;
+}
+
+void ion_device_destroy(struct ion_device *dev)
+{
+       misc_deregister(&dev->dev);
+       debugfs_remove_recursive(dev->debug_root);
+       /* XXX need to free the heaps and clients ? */
+       kfree(dev);
+}
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+       int i;
+
+       for (i = 0; i < data->nr; i++) {
+               if (data->heaps[i].size == 0)
+                       continue;
+
+               if (data->heaps[i].base == 0) {
+                       phys_addr_t paddr;
+                       paddr = memblock_alloc_base(data->heaps[i].size,
+                                                   data->heaps[i].align,
+                                                   MEMBLOCK_ALLOC_ANYWHERE);
+                       if (!paddr) {
+                               pr_err("%s: error allocating memblock for "
+                                      "heap %d\n",
+                                       __func__, i);
+                               continue;
+                       }
+                       data->heaps[i].base = paddr;
+               } else {
+                       int ret = memblock_reserve(data->heaps[i].base,
+                                              data->heaps[i].size);
+                       if (ret)
+                               pr_err("memblock reserve of %zx@%lx failed\n",
+                                      data->heaps[i].size,
+                                      data->heaps[i].base);
+               }
+               pr_info("%s: %s reserved base %lx size %zu\n", __func__,
+                       data->heaps[i].name,
+                       data->heaps[i].base,
+                       data->heaps[i].size);
+       }
+}
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
new file mode 100644 (file)
index 0000000..dcd2a0c
--- /dev/null
@@ -0,0 +1,204 @@
+/*
+ * drivers/staging/android/ion/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ION_H
+#define _LINUX_ION_H
+
+#include <linux/types.h>
+
+#include "../uapi/ion.h"
+
+struct ion_handle;
+struct ion_device;
+struct ion_heap;
+struct ion_mapper;
+struct ion_client;
+struct ion_buffer;
+
+/* This should be removed some day when phys_addr_t's are fully
+   plumbed in the kernel, and all instances of ion_phys_addr_t should
+   be converted to phys_addr_t.  For the time being many kernel interfaces
+   do not accept phys_addr_t's that would have to */
+#define ion_phys_addr_t unsigned long
+
+/**
+ * struct ion_platform_heap - defines a heap in the given platform
+ * @type:      type of the heap from ion_heap_type enum
+ * @id:                unique identifier for heap.  When allocating higher numbers
+ *             will be allocated from first.  At allocation these are passed
+ *             as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
+ * @name:      used for debug purposes
+ * @base:      base address of heap in physical memory if applicable
+ * @size:      size of the heap in bytes if applicable
+ * @align:     required alignment in physical memory if applicable
+ * @priv:      private info passed from the board file
+ *
+ * Provided by the board file.
+ */
+struct ion_platform_heap {
+       enum ion_heap_type type;
+       unsigned int id;
+       const char *name;
+       ion_phys_addr_t base;
+       size_t size;
+       ion_phys_addr_t align;
+       void *priv;
+};
+
+/**
+ * struct ion_platform_data - array of platform heaps passed from board file
+ * @nr:                number of structures in the array
+ * @heaps:     array of platform_heap structions
+ *
+ * Provided by the board file in the form of platform data to a platform device.
+ */
+struct ion_platform_data {
+       int nr;
+       struct ion_platform_heap *heaps;
+};
+
+/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data:      platform data specifying starting physical address and
+ *             size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specfic sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
+/**
+ * ion_client_create() -  allocate a client and returns it
+ * @dev:               the global ion device
+ * @heap_type_mask:    mask of heaps this client can allocate from
+ * @name:              used for debugging
+ */
+struct ion_client *ion_client_create(struct ion_device *dev,
+                                    const char *name);
+
+/**
+ * ion_client_destroy() -  free's a client and all it's handles
+ * @client:    the client
+ *
+ * Free the provided client and all it's resources including
+ * any handles it is holding.
+ */
+void ion_client_destroy(struct ion_client *client);
+
+/**
+ * ion_alloc - allocate ion memory
+ * @client:            the client
+ * @len:               size of the allocation
+ * @align:             requested allocation alignment, lots of hardware blocks
+ *                     have alignment requirements of some kind
+ * @heap_id_mask:      mask of heaps to allocate from, if multiple bits are set
+ *                     heaps will be tried in order from highest to lowest
+ *                     id
+ * @flags:             heap flags, the low 16 bits are consumed by ion, the
+ *                     high 16 bits are passed on to the respective heap and
+ *                     can be heap custom
+ *
+ * Allocate memory in one of the heaps provided in heap mask and return
+ * an opaque handle to it.
+ */
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+                            size_t align, unsigned int heap_id_mask,
+                            unsigned int flags);
+
+/**
+ * ion_free - free a handle
+ * @client:    the client
+ * @handle:    the handle to free
+ *
+ * Free the provided handle.
+ */
+void ion_free(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client:    the client
+ * @handle:    the handle
+ * @addr:      a pointer to put the address in
+ * @len:       a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address.  It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_sg_table should be used
+ * instead.  Returns -EINVAL if the handle is invalid.  This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+            ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client:    the client
+ * @handle:    the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+                             struct ion_handle *handle);
+
+/**
+ * ion_map_kernel - create mapping for the given handle
+ * @client:    the client
+ * @handle:    handle to map
+ *
+ * Map the given handle into the kernel and return a kernel address that
+ * can be used to access this address.
+ */
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_unmap_kernel() - destroy a kernel mapping for a handle
+ * @client:    the client
+ * @handle:    handle to unmap
+ */
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf() - share buffer as dma-buf
+ * @client:    the client
+ * @handle:    the handle
+ */
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+                                               struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
+ * @client:    the client
+ * @handle:    the handle
+ */
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
+ * @client:    the client
+ * @fd:                the dma-buf fd
+ *
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
+ * import that fd and return a handle representing it.  If a dma-buf from
+ * another exporter is passed in this function will return ERR_PTR(-EINVAL)
+ */
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
+
+#endif /* _LINUX_ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
new file mode 100644 (file)
index 0000000..5165de2
--- /dev/null
@@ -0,0 +1,194 @@
+/*
+ * drivers/gpu/ion/ion_carveout_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_carveout_heap {
+       struct ion_heap heap;
+       struct gen_pool *pool;
+       ion_phys_addr_t base;
+};
+
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+                                     unsigned long size,
+                                     unsigned long align)
+{
+       struct ion_carveout_heap *carveout_heap =
+               container_of(heap, struct ion_carveout_heap, heap);
+       unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
+
+       if (!offset)
+               return ION_CARVEOUT_ALLOCATE_FAIL;
+
+       return offset;
+}
+
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+                      unsigned long size)
+{
+       struct ion_carveout_heap *carveout_heap =
+               container_of(heap, struct ion_carveout_heap, heap);
+
+       if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+               return;
+       gen_pool_free(carveout_heap->pool, addr, size);
+}
+
+static int ion_carveout_heap_phys(struct ion_heap *heap,
+                                 struct ion_buffer *buffer,
+                                 ion_phys_addr_t *addr, size_t *len)
+{
+       struct sg_table *table = buffer->priv_virt;
+       struct page *page = sg_page(table->sgl);
+       ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+       *addr = paddr;
+       *len = buffer->size;
+       return 0;
+}
+
+static int ion_carveout_heap_allocate(struct ion_heap *heap,
+                                     struct ion_buffer *buffer,
+                                     unsigned long size, unsigned long align,
+                                     unsigned long flags)
+{
+       struct sg_table *table;
+       ion_phys_addr_t paddr;
+       int ret;
+
+       if (align > PAGE_SIZE)
+               return -EINVAL;
+
+       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
+       ret = sg_alloc_table(table, 1, GFP_KERNEL);
+       if (ret)
+               goto err_free;
+
+       paddr = ion_carveout_allocate(heap, size, align);
+       if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
+               ret = -ENOMEM;
+               goto err_free_table;
+       }
+
+       sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
+       buffer->priv_virt = table;
+
+       return 0;
+
+err_free_table:
+       sg_free_table(table);
+err_free:
+       kfree(table);
+       return ret;
+}
+
+static void ion_carveout_heap_free(struct ion_buffer *buffer)
+{
+       struct ion_heap *heap = buffer->heap;
+       struct sg_table *table = buffer->priv_virt;
+       struct page *page = sg_page(table->sgl);
+       ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+       ion_heap_buffer_zero(buffer);
+
+       if (ion_buffer_cached(buffer))
+               dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+                                                       DMA_BIDIRECTIONAL);
+
+       ion_carveout_free(heap, paddr, buffer->size);
+       sg_free_table(table);
+       kfree(table);
+}
+
+static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+                                                 struct ion_buffer *buffer)
+{
+       return buffer->priv_virt;
+}
+
+static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+                                       struct ion_buffer *buffer)
+{
+       return;
+}
+
+static struct ion_heap_ops carveout_heap_ops = {
+       .allocate = ion_carveout_heap_allocate,
+       .free = ion_carveout_heap_free,
+       .phys = ion_carveout_heap_phys,
+       .map_dma = ion_carveout_heap_map_dma,
+       .unmap_dma = ion_carveout_heap_unmap_dma,
+       .map_user = ion_heap_map_user,
+       .map_kernel = ion_heap_map_kernel,
+       .unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+{
+       struct ion_carveout_heap *carveout_heap;
+       int ret;
+
+       struct page *page;
+       size_t size;
+
+       page = pfn_to_page(PFN_DOWN(heap_data->base));
+       size = heap_data->size;
+
+       ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+       ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+       if (ret)
+               return ERR_PTR(ret);
+
+       carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
+       if (!carveout_heap)
+               return ERR_PTR(-ENOMEM);
+
+       carveout_heap->pool = gen_pool_create(12, -1);
+       if (!carveout_heap->pool) {
+               kfree(carveout_heap);
+               return ERR_PTR(-ENOMEM);
+       }
+       carveout_heap->base = heap_data->base;
+       gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
+                    -1);
+       carveout_heap->heap.ops = &carveout_heap_ops;
+       carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
+       carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+
+       return &carveout_heap->heap;
+}
+
+void ion_carveout_heap_destroy(struct ion_heap *heap)
+{
+       struct ion_carveout_heap *carveout_heap =
+            container_of(heap, struct  ion_carveout_heap, heap);
+
+       gen_pool_destroy(carveout_heap->pool);
+       kfree(carveout_heap);
+       carveout_heap = NULL;
+}
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
new file mode 100644 (file)
index 0000000..ca20d62
--- /dev/null
@@ -0,0 +1,195 @@
+/*
+ * drivers/gpu/ion/ion_chunk_heap.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_chunk_heap {
+       struct ion_heap heap;
+       struct gen_pool *pool;
+       ion_phys_addr_t base;
+       unsigned long chunk_size;
+       unsigned long size;
+       unsigned long allocated;
+};
+
+static int ion_chunk_heap_allocate(struct ion_heap *heap,
+                                     struct ion_buffer *buffer,
+                                     unsigned long size, unsigned long align,
+                                     unsigned long flags)
+{
+       struct ion_chunk_heap *chunk_heap =
+               container_of(heap, struct ion_chunk_heap, heap);
+       struct sg_table *table;
+       struct scatterlist *sg;
+       int ret, i;
+       unsigned long num_chunks;
+       unsigned long allocated_size;
+
+       if (align > chunk_heap->chunk_size)
+               return -EINVAL;
+
+       allocated_size = ALIGN(size, chunk_heap->chunk_size);
+       num_chunks = allocated_size / chunk_heap->chunk_size;
+
+       if (allocated_size > chunk_heap->size - chunk_heap->allocated)
+               return -ENOMEM;
+
+       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
+       ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
+       if (ret) {
+               kfree(table);
+               return ret;
+       }
+
+       sg = table->sgl;
+       for (i = 0; i < num_chunks; i++) {
+               unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
+                                                    chunk_heap->chunk_size);
+               if (!paddr)
+                       goto err;
+               sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
+                               chunk_heap->chunk_size, 0);
+               sg = sg_next(sg);
+       }
+
+       buffer->priv_virt = table;
+       chunk_heap->allocated += allocated_size;
+       return 0;
+err:
+       sg = table->sgl;
+       for (i -= 1; i >= 0; i--) {
+               gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+                             sg->length);
+               sg = sg_next(sg);
+       }
+       sg_free_table(table);
+       kfree(table);
+       return -ENOMEM;
+}
+
+static void ion_chunk_heap_free(struct ion_buffer *buffer)
+{
+       struct ion_heap *heap = buffer->heap;
+       struct ion_chunk_heap *chunk_heap =
+               container_of(heap, struct ion_chunk_heap, heap);
+       struct sg_table *table = buffer->priv_virt;
+       struct scatterlist *sg;
+       int i;
+       unsigned long allocated_size;
+
+       allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
+
+       ion_heap_buffer_zero(buffer);
+
+       if (ion_buffer_cached(buffer))
+               dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+                                                               DMA_BIDIRECTIONAL);
+
+       for_each_sg(table->sgl, sg, table->nents, i) {
+               gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+                             sg->length);
+       }
+       chunk_heap->allocated -= allocated_size;
+       sg_free_table(table);
+       kfree(table);
+}
+
+static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
+                                              struct ion_buffer *buffer)
+{
+       return buffer->priv_virt;
+}
+
+static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
+                                    struct ion_buffer *buffer)
+{
+       return;
+}
+
+static struct ion_heap_ops chunk_heap_ops = {
+       .allocate = ion_chunk_heap_allocate,
+       .free = ion_chunk_heap_free,
+       .map_dma = ion_chunk_heap_map_dma,
+       .unmap_dma = ion_chunk_heap_unmap_dma,
+       .map_user = ion_heap_map_user,
+       .map_kernel = ion_heap_map_kernel,
+       .unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
+{
+       struct ion_chunk_heap *chunk_heap;
+       int ret;
+       struct page *page;
+       size_t size;
+
+       page = pfn_to_page(PFN_DOWN(heap_data->base));
+       size = heap_data->size;
+
+       ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+       ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+       if (ret)
+               return ERR_PTR(ret);
+
+       chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
+       if (!chunk_heap)
+               return ERR_PTR(-ENOMEM);
+
+       chunk_heap->chunk_size = (unsigned long)heap_data->priv;
+       chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
+                                          PAGE_SHIFT, -1);
+       if (!chunk_heap->pool) {
+               ret = -ENOMEM;
+               goto error_gen_pool_create;
+       }
+       chunk_heap->base = heap_data->base;
+       chunk_heap->size = heap_data->size;
+       chunk_heap->allocated = 0;
+
+       gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
+       chunk_heap->heap.ops = &chunk_heap_ops;
+       chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
+       chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+       pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base,
+               heap_data->size, heap_data->align);
+
+       return &chunk_heap->heap;
+
+error_gen_pool_create:
+       kfree(chunk_heap);
+       return ERR_PTR(ret);
+}
+
+void ion_chunk_heap_destroy(struct ion_heap *heap)
+{
+       struct ion_chunk_heap *chunk_heap =
+            container_of(heap, struct  ion_chunk_heap, heap);
+
+       gen_pool_destroy(chunk_heap->pool);
+       kfree(chunk_heap);
+       chunk_heap = NULL;
+}
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
new file mode 100644 (file)
index 0000000..4418bda
--- /dev/null
@@ -0,0 +1,218 @@
+/*
+ * drivers/gpu/ion/ion_cma_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+
+#define ION_CMA_ALLOCATE_FAILED -1
+
+struct ion_cma_heap {
+       struct ion_heap heap;
+       struct device *dev;
+};
+
+#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
+
+struct ion_cma_buffer_info {
+       void *cpu_addr;
+       dma_addr_t handle;
+       struct sg_table *table;
+};
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replaced by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+                              void *cpu_addr, dma_addr_t handle, size_t size)
+{
+       struct page *page = virt_to_page(cpu_addr);
+       int ret;
+
+       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+       if (unlikely(ret))
+               return ret;
+
+       sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+       return 0;
+}
+
+/* ION CMA heap operations functions */
+static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
+                           unsigned long len, unsigned long align,
+                           unsigned long flags)
+{
+       struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+       struct device *dev = cma_heap->dev;
+       struct ion_cma_buffer_info *info;
+
+       dev_dbg(dev, "Request buffer allocation len %ld\n", len);
+
+       if (buffer->flags & ION_FLAG_CACHED)
+               return -EINVAL;
+
+       if (align > PAGE_SIZE)
+               return -EINVAL;
+
+       info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
+       if (!info) {
+               dev_err(dev, "Can't allocate buffer info\n");
+               return ION_CMA_ALLOCATE_FAILED;
+       }
+
+       info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
+                                               GFP_HIGHUSER | __GFP_ZERO);
+
+       if (!info->cpu_addr) {
+               dev_err(dev, "Fail to allocate buffer\n");
+               goto err;
+       }
+
+       info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+       if (!info->table) {
+               dev_err(dev, "Fail to allocate sg table\n");
+               goto free_mem;
+       }
+
+       if (ion_cma_get_sgtable
+           (dev, info->table, info->cpu_addr, info->handle, len))
+               goto free_table;
+       /* keep this for memory release */
+       buffer->priv_virt = info;
+       dev_dbg(dev, "Allocate buffer %p\n", buffer);
+       return 0;
+
+free_table:
+       kfree(info->table);
+free_mem:
+       dma_free_coherent(dev, len, info->cpu_addr, info->handle);
+err:
+       kfree(info);
+       return ION_CMA_ALLOCATE_FAILED;
+}
+
+static void ion_cma_free(struct ion_buffer *buffer)
+{
+       struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+       struct device *dev = cma_heap->dev;
+       struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+       dev_dbg(dev, "Release buffer %p\n", buffer);
+       /* release memory */
+       dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+       /* release sg table */
+       sg_free_table(info->table);
+       kfree(info->table);
+       kfree(info);
+}
+
+/* return physical address in addr */
+static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+                       ion_phys_addr_t *addr, size_t *len)
+{
+       struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+       struct device *dev = cma_heap->dev;
+       struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+       dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+               &info->handle);
+
+       *addr = info->handle;
+       *len = buffer->size;
+
+       return 0;
+}
+
+static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+                                            struct ion_buffer *buffer)
+{
+       struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+       return info->table;
+}
+
+static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+                                  struct ion_buffer *buffer)
+{
+       return;
+}
+
+static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
+                       struct vm_area_struct *vma)
+{
+       struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+       struct device *dev = cma_heap->dev;
+       struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+       return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
+                                buffer->size);
+}
+
+static void *ion_cma_map_kernel(struct ion_heap *heap,
+                               struct ion_buffer *buffer)
+{
+       struct ion_cma_buffer_info *info = buffer->priv_virt;
+       /* kernel memory mapping has been done at allocation time */
+       return info->cpu_addr;
+}
+
+static void ion_cma_unmap_kernel(struct ion_heap *heap,
+                                       struct ion_buffer *buffer)
+{
+}
+
+static struct ion_heap_ops ion_cma_ops = {
+       .allocate = ion_cma_allocate,
+       .free = ion_cma_free,
+       .map_dma = ion_cma_heap_map_dma,
+       .unmap_dma = ion_cma_heap_unmap_dma,
+       .phys = ion_cma_phys,
+       .map_user = ion_cma_mmap,
+       .map_kernel = ion_cma_map_kernel,
+       .unmap_kernel = ion_cma_unmap_kernel,
+};
+
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
+{
+       struct ion_cma_heap *cma_heap;
+
+       cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
+
+       if (!cma_heap)
+               return ERR_PTR(-ENOMEM);
+
+       cma_heap->heap.ops = &ion_cma_ops;
+       /* get device from private heaps data, later it will be
+        * used to make the link with reserved CMA memory */
+       cma_heap->dev = data->priv;
+       cma_heap->heap.type = ION_HEAP_TYPE_DMA;
+       return &cma_heap->heap;
+}
+
+void ion_cma_heap_destroy(struct ion_heap *heap)
+{
+       struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+
+       kfree(cma_heap);
+}
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
new file mode 100644 (file)
index 0000000..55b2002
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+ * drivers/gpu/ion/ion_dummy_driver.c
+ *
+ * Copyright (C) 2013 Linaro, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/sizes.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_device *idev;
+struct ion_heap **heaps;
+
+void *carveout_ptr;
+void *chunk_ptr;
+
+struct ion_platform_heap dummy_heaps[] = {
+               {
+                       .id     = ION_HEAP_TYPE_SYSTEM,
+                       .type   = ION_HEAP_TYPE_SYSTEM,
+                       .name   = "system",
+               },
+               {
+                       .id     = ION_HEAP_TYPE_SYSTEM_CONTIG,
+                       .type   = ION_HEAP_TYPE_SYSTEM_CONTIG,
+                       .name   = "system contig",
+               },
+               {
+                       .id     = ION_HEAP_TYPE_CARVEOUT,
+                       .type   = ION_HEAP_TYPE_CARVEOUT,
+                       .name   = "carveout",
+                       .size   = SZ_4M,
+               },
+               {
+                       .id     = ION_HEAP_TYPE_CHUNK,
+                       .type   = ION_HEAP_TYPE_CHUNK,
+                       .name   = "chunk",
+                       .size   = SZ_4M,
+                       .align  = SZ_16K,
+                       .priv   = (void *)(SZ_16K),
+               },
+};
+
+struct ion_platform_data dummy_ion_pdata = {
+       .nr = 4,
+       .heaps = dummy_heaps,
+};
+
+static int __init ion_dummy_init(void)
+{
+       int i, err;
+
+       idev = ion_device_create(NULL);
+       heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr,
+                       GFP_KERNEL);
+       if (!heaps)
+               return PTR_ERR(heaps);
+
+
+       /* Allocate a dummy carveout heap */
+       carveout_ptr = alloc_pages_exact(
+                               dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
+                               GFP_KERNEL);
+       if (carveout_ptr)
+               dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =
+                                               virt_to_phys(carveout_ptr);
+       else
+               pr_err("ion_dummy: Could not allocate carveout\n");
+
+       /* Allocate a dummy chunk heap */
+       chunk_ptr = alloc_pages_exact(
+                               dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
+                               GFP_KERNEL);
+       if (chunk_ptr)
+               dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
+       else
+               pr_err("ion_dummy: Could not allocate chunk\n");
+
+       for (i = 0; i < dummy_ion_pdata.nr; i++) {
+               struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
+
+               if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
+                                                       !heap_data->base)
+                       continue;
+
+               if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
+                       continue;
+
+               heaps[i] = ion_heap_create(heap_data);
+               if (IS_ERR_OR_NULL(heaps[i])) {
+                       err = PTR_ERR(heaps[i]);
+                       goto err;
+               }
+               ion_device_add_heap(idev, heaps[i]);
+       }
+       return 0;
+err:
+       for (i = 0; i < dummy_ion_pdata.nr; i++) {
+               if (heaps[i])
+                       ion_heap_destroy(heaps[i]);
+       }
+       kfree(heaps);
+
+       if (carveout_ptr) {
+               free_pages_exact(carveout_ptr,
+                               dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+               carveout_ptr = NULL;
+       }
+       if (chunk_ptr) {
+               free_pages_exact(chunk_ptr,
+                               dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+               chunk_ptr = NULL;
+       }
+       return err;
+}
+
+static void __exit ion_dummy_exit(void)
+{
+       int i;
+
+       ion_device_destroy(idev);
+
+       for (i = 0; i < dummy_ion_pdata.nr; i++)
+               ion_heap_destroy(heaps[i]);
+       kfree(heaps);
+
+       if (carveout_ptr) {
+               free_pages_exact(carveout_ptr,
+                               dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+               carveout_ptr = NULL;
+       }
+       if (chunk_ptr) {
+               free_pages_exact(chunk_ptr,
+                               dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+               chunk_ptr = NULL;
+       }
+
+       return;
+}
+
+module_init(ion_dummy_init);
+module_exit(ion_dummy_exit);
+
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
new file mode 100644 (file)
index 0000000..750b76a
--- /dev/null
@@ -0,0 +1,369 @@
+/*
+ * drivers/gpu/ion/ion_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+void *ion_heap_map_kernel(struct ion_heap *heap,
+                         struct ion_buffer *buffer)
+{
+       struct scatterlist *sg;
+       int i, j;
+       void *vaddr;
+       pgprot_t pgprot;
+       struct sg_table *table = buffer->sg_table;
+       int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+       struct page **pages = vmalloc(sizeof(struct page *) * npages);
+       struct page **tmp = pages;
+
+       if (!pages)
+               return NULL;
+
+       if (buffer->flags & ION_FLAG_CACHED)
+               pgprot = PAGE_KERNEL;
+       else
+               pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+       for_each_sg(table->sgl, sg, table->nents, i) {
+               int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
+               struct page *page = sg_page(sg);
+               BUG_ON(i >= npages);
+               for (j = 0; j < npages_this_entry; j++)
+                       *(tmp++) = page++;
+       }
+       vaddr = vmap(pages, npages, VM_MAP, pgprot);
+       vfree(pages);
+
+       if (vaddr == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       return vaddr;
+}
+
+void ion_heap_unmap_kernel(struct ion_heap *heap,
+                          struct ion_buffer *buffer)
+{
+       vunmap(buffer->vaddr);
+}
+
+int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+                     struct vm_area_struct *vma)
+{
+       struct sg_table *table = buffer->sg_table;
+       unsigned long addr = vma->vm_start;
+       unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+       struct scatterlist *sg;
+       int i;
+       int ret;
+
+       for_each_sg(table->sgl, sg, table->nents, i) {
+               struct page *page = sg_page(sg);
+               unsigned long remainder = vma->vm_end - addr;
+               unsigned long len = sg->length;
+
+               if (offset >= sg->length) {
+                       offset -= sg->length;
+                       continue;
+               } else if (offset) {
+                       page += offset / PAGE_SIZE;
+                       len = sg->length - offset;
+                       offset = 0;
+               }
+               len = min(len, remainder);
+               ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
+                               vma->vm_page_prot);
+               if (ret)
+                       return ret;
+               addr += len;
+               if (addr >= vma->vm_end)
+                       return 0;
+       }
+       return 0;
+}
+
+static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
+{
+       void *addr = vm_map_ram(pages, num, -1, pgprot);
+       if (!addr)
+               return -ENOMEM;
+       memset(addr, 0, PAGE_SIZE * num);
+       vm_unmap_ram(addr, num);
+
+       return 0;
+}
+
+static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
+                                               pgprot_t pgprot)
+{
+       int p = 0;
+       int ret = 0;
+       struct sg_page_iter piter;
+       struct page *pages[32];
+
+       for_each_sg_page(sgl, &piter, nents, 0) {
+               pages[p++] = sg_page_iter_page(&piter);
+               if (p == ARRAY_SIZE(pages)) {
+                       ret = ion_heap_clear_pages(pages, p, pgprot);
+                       if (ret)
+                               return ret;
+                       p = 0;
+               }
+       }
+       if (p)
+               ret = ion_heap_clear_pages(pages, p, pgprot);
+
+       return ret;
+}
+
+int ion_heap_buffer_zero(struct ion_buffer *buffer)
+{
+       struct sg_table *table = buffer->sg_table;
+       pgprot_t pgprot;
+
+       if (buffer->flags & ION_FLAG_CACHED)
+               pgprot = PAGE_KERNEL;
+       else
+               pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+       return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
+}
+
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
+{
+       struct scatterlist sg;
+
+       sg_init_table(&sg, 1);
+       sg_set_page(&sg, page, size, 0);
+       return ion_heap_sglist_zero(&sg, 1, pgprot);
+}
+
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
+{
+       spin_lock(&heap->free_lock);
+       list_add(&buffer->list, &heap->free_list);
+       heap->free_list_size += buffer->size;
+       spin_unlock(&heap->free_lock);
+       wake_up(&heap->waitqueue);
+}
+
+size_t ion_heap_freelist_size(struct ion_heap *heap)
+{
+       size_t size;
+
+       spin_lock(&heap->free_lock);
+       size = heap->free_list_size;
+       spin_unlock(&heap->free_lock);
+
+       return size;
+}
+
+static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
+                               bool skip_pools)
+{
+       struct ion_buffer *buffer;
+       size_t total_drained = 0;
+
+       if (ion_heap_freelist_size(heap) == 0)
+               return 0;
+
+       spin_lock(&heap->free_lock);
+       if (size == 0)
+               size = heap->free_list_size;
+
+       while (!list_empty(&heap->free_list)) {
+               if (total_drained >= size)
+                       break;
+               buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+                                         list);
+               list_del(&buffer->list);
+               heap->free_list_size -= buffer->size;
+               if (skip_pools)
+                       buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
+               total_drained += buffer->size;
+               spin_unlock(&heap->free_lock);
+               ion_buffer_destroy(buffer);
+               spin_lock(&heap->free_lock);
+       }
+       spin_unlock(&heap->free_lock);
+
+       return total_drained;
+}
+
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+       return _ion_heap_freelist_drain(heap, size, false);
+}
+
+size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
+{
+       return _ion_heap_freelist_drain(heap, size, true);
+}
+
+static int ion_heap_deferred_free(void *data)
+{
+       struct ion_heap *heap = data;
+
+       while (true) {
+               struct ion_buffer *buffer;
+
+               wait_event_freezable(heap->waitqueue,
+                                    ion_heap_freelist_size(heap) > 0);
+
+               spin_lock(&heap->free_lock);
+               if (list_empty(&heap->free_list)) {
+                       spin_unlock(&heap->free_lock);
+                       continue;
+               }
+               buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+                                         list);
+               list_del(&buffer->list);
+               heap->free_list_size -= buffer->size;
+               spin_unlock(&heap->free_lock);
+               ion_buffer_destroy(buffer);
+       }
+
+       return 0;
+}
+
+int ion_heap_init_deferred_free(struct ion_heap *heap)
+{
+       struct sched_param param = { .sched_priority = 0 };
+
+       INIT_LIST_HEAD(&heap->free_list);
+       heap->free_list_size = 0;
+       spin_lock_init(&heap->free_lock);
+       init_waitqueue_head(&heap->waitqueue);
+       heap->task = kthread_run(ion_heap_deferred_free, heap,
+                                "%s", heap->name);
+       sched_setscheduler(heap->task, SCHED_IDLE, &param);
+       if (IS_ERR(heap->task)) {
+               pr_err("%s: creating thread for deferred free failed\n",
+                      __func__);
+               return PTR_RET(heap->task);
+       }
+       return 0;
+}
+
+static int ion_heap_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+{
+       struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+                                            shrinker);
+       int total = 0;
+       int freed = 0;
+       int to_scan = sc->nr_to_scan;
+
+       if (to_scan == 0)
+               goto out;
+
+       /*
+        * shrink the free list first, no point in zeroing the memory if we're
+        * just going to reclaim it. Also, skip any possible page pooling.
+        */
+       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+               freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
+                               PAGE_SIZE;
+
+       to_scan -= freed;
+       if (to_scan < 0)
+               to_scan = 0;
+
+out:
+       total = ion_heap_freelist_size(heap) / PAGE_SIZE;
+       if (heap->ops->shrink)
+               total += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
+       return total;
+}
+
+void ion_heap_init_shrinker(struct ion_heap *heap)
+{
+       heap->shrinker.shrink = ion_heap_shrink;
+       heap->shrinker.seeks = DEFAULT_SEEKS;
+       heap->shrinker.batch = 0;
+       register_shrinker(&heap->shrinker);
+}
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
+{
+       struct ion_heap *heap = NULL;
+
+       switch (heap_data->type) {
+       case ION_HEAP_TYPE_SYSTEM_CONTIG:
+               heap = ion_system_contig_heap_create(heap_data);
+               break;
+       case ION_HEAP_TYPE_SYSTEM:
+               heap = ion_system_heap_create(heap_data);
+               break;
+       case ION_HEAP_TYPE_CARVEOUT:
+               heap = ion_carveout_heap_create(heap_data);
+               break;
+       case ION_HEAP_TYPE_CHUNK:
+               heap = ion_chunk_heap_create(heap_data);
+               break;
+       case ION_HEAP_TYPE_DMA:
+               heap = ion_cma_heap_create(heap_data);
+               break;
+       default:
+               pr_err("%s: Invalid heap type %d\n", __func__,
+                      heap_data->type);
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (IS_ERR_OR_NULL(heap)) {
+               pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
+                      __func__, heap_data->name, heap_data->type,
+                      heap_data->base, heap_data->size);
+               return ERR_PTR(-EINVAL);
+       }
+
+       heap->name = heap_data->name;
+       heap->id = heap_data->id;
+       return heap;
+}
+
+void ion_heap_destroy(struct ion_heap *heap)
+{
+       if (!heap)
+               return;
+
+       switch (heap->type) {
+       case ION_HEAP_TYPE_SYSTEM_CONTIG:
+               ion_system_contig_heap_destroy(heap);
+               break;
+       case ION_HEAP_TYPE_SYSTEM:
+               ion_system_heap_destroy(heap);
+               break;
+       case ION_HEAP_TYPE_CARVEOUT:
+               ion_carveout_heap_destroy(heap);
+               break;
+       case ION_HEAP_TYPE_CHUNK:
+               ion_chunk_heap_destroy(heap);
+               break;
+       case ION_HEAP_TYPE_DMA:
+               ion_cma_heap_destroy(heap);
+               break;
+       default:
+               pr_err("%s: Invalid heap type %d\n", __func__,
+                      heap->type);
+       }
+}
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
new file mode 100644 (file)
index 0000000..0e20e62
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * drivers/gpu/ion/ion_mem_pool.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "ion_priv.h"
+
+struct ion_page_pool_item {
+       struct page *page;
+       struct list_head list;
+};
+
+static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+{
+       struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+
+       if (!page)
+               return NULL;
+       ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+                                               DMA_BIDIRECTIONAL);
+       return page;
+}
+
+static void ion_page_pool_free_pages(struct ion_page_pool *pool,
+                                    struct page *page)
+{
+       __free_pages(page, pool->order);
+}
+
+static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
+{
+       struct ion_page_pool_item *item;
+
+       item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
+       if (!item)
+               return -ENOMEM;
+
+       mutex_lock(&pool->mutex);
+       item->page = page;
+       if (PageHighMem(page)) {
+               list_add_tail(&item->list, &pool->high_items);
+               pool->high_count++;
+       } else {
+               list_add_tail(&item->list, &pool->low_items);
+               pool->low_count++;
+       }
+       mutex_unlock(&pool->mutex);
+       return 0;
+}
+
+static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
+{
+       struct ion_page_pool_item *item;
+       struct page *page;
+
+       if (high) {
+               BUG_ON(!pool->high_count);
+               item = list_first_entry(&pool->high_items,
+                                       struct ion_page_pool_item, list);
+               pool->high_count--;
+       } else {
+               BUG_ON(!pool->low_count);
+               item = list_first_entry(&pool->low_items,
+                                       struct ion_page_pool_item, list);
+               pool->low_count--;
+       }
+
+       list_del(&item->list);
+       page = item->page;
+       kfree(item);
+       return page;
+}
+
+void *ion_page_pool_alloc(struct ion_page_pool *pool)
+{
+       struct page *page = NULL;
+
+       BUG_ON(!pool);
+
+       mutex_lock(&pool->mutex);
+       if (pool->high_count)
+               page = ion_page_pool_remove(pool, true);
+       else if (pool->low_count)
+               page = ion_page_pool_remove(pool, false);
+       mutex_unlock(&pool->mutex);
+
+       if (!page)
+               page = ion_page_pool_alloc_pages(pool);
+
+       return page;
+}
+
+void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
+{
+       int ret;
+
+       ret = ion_page_pool_add(pool, page);
+       if (ret)
+               ion_page_pool_free_pages(pool, page);
+}
+
+static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
+{
+       int total = 0;
+
+       total += high ? (pool->high_count + pool->low_count) *
+               (1 << pool->order) :
+                       pool->low_count * (1 << pool->order);
+       return total;
+}
+
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+                               int nr_to_scan)
+{
+       int i;
+       bool high;
+
+       high = !!(gfp_mask & __GFP_HIGHMEM);
+
+       for (i = 0; i < nr_to_scan; i++) {
+               struct page *page;
+
+               mutex_lock(&pool->mutex);
+               if (pool->low_count) {
+                       page = ion_page_pool_remove(pool, false);
+               } else if (high && pool->high_count) {
+                       page = ion_page_pool_remove(pool, true);
+               } else {
+                       mutex_unlock(&pool->mutex);
+                       break;
+               }
+               mutex_unlock(&pool->mutex);
+               ion_page_pool_free_pages(pool, page);
+       }
+
+       return ion_page_pool_total(pool, high);
+}
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+{
+       struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
+                                            GFP_KERNEL);
+       if (!pool)
+               return NULL;
+       pool->high_count = 0;
+       pool->low_count = 0;
+       INIT_LIST_HEAD(&pool->low_items);
+       INIT_LIST_HEAD(&pool->high_items);
+       pool->gfp_mask = gfp_mask;
+       pool->order = order;
+       mutex_init(&pool->mutex);
+       plist_node_init(&pool->list, order);
+
+       return pool;
+}
+
+void ion_page_pool_destroy(struct ion_page_pool *pool)
+{
+       kfree(pool);
+}
+
+static int __init ion_page_pool_init(void)
+{
+       return 0;
+}
+
+static void __exit ion_page_pool_exit(void)
+{
+}
+
+module_init(ion_page_pool_init);
+module_exit(ion_page_pool_exit);
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
new file mode 100644 (file)
index 0000000..9bcd077
--- /dev/null
@@ -0,0 +1,405 @@
+/*
+ * drivers/gpu/ion/ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ION_PRIV_H
+#define _ION_PRIV_H
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/shrinker.h>
+#include <linux/types.h>
+#include <linux/device.h>
+
+#include "ion.h"
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
+
+/**
+ * struct ion_buffer - metadata for a particular buffer
+ * @ref:               refernce count
+ * @node:              node in the ion_device buffers tree
+ * @dev:               back pointer to the ion_device
+ * @heap:              back pointer to the heap the buffer came from
+ * @flags:             buffer specific flags
+ * @private_flags:     internal buffer specific flags
+ * @size:              size of the buffer
+ * @priv_virt:         private data to the buffer representable as
+ *                     a void *
+ * @priv_phys:         private data to the buffer representable as
+ *                     an ion_phys_addr_t (and someday a phys_addr_t)
+ * @lock:              protects the buffers cnt fields
+ * @kmap_cnt:          number of times the buffer is mapped to the kernel
+ * @vaddr:             the kenrel mapping if kmap_cnt is not zero
+ * @dmap_cnt:          number of times the buffer is mapped for dma
+ * @sg_table:          the sg table for the buffer if dmap_cnt is not zero
+ * @pages:             flat array of pages in the buffer -- used by fault
+ *                     handler and only valid for buffers that are faulted in
+ * @vmas:              list of vma's mapping this buffer
+ * @handle_count:      count of handles referencing this buffer
+ * @task_comm:         taskcomm of last client to reference this buffer in a
+ *                     handle, used for debugging
+ * @pid:               pid of last client to reference this buffer in a
+ *                     handle, used for debugging
+*/
+struct ion_buffer {
+       struct kref ref;
+       union {
+               struct rb_node node;
+               struct list_head list;
+       };
+       struct ion_device *dev;
+       struct ion_heap *heap;
+       unsigned long flags;
+       unsigned long private_flags;
+       size_t size;
+       union {
+               void *priv_virt;
+               ion_phys_addr_t priv_phys;
+       };
+       struct mutex lock;
+       int kmap_cnt;
+       void *vaddr;
+       int dmap_cnt;
+       struct sg_table *sg_table;
+       struct page **pages;
+       struct list_head vmas;
+       /* used to track orphaned buffers */
+       int handle_count;
+       char task_comm[TASK_COMM_LEN];
+       pid_t pid;
+};
+void ion_buffer_destroy(struct ion_buffer *buffer);
+
+/**
+ * struct ion_heap_ops - ops to operate on a given heap
+ * @allocate:          allocate memory
+ * @free:              free memory
+ * @phys               get physical address of a buffer (only define on
+ *                     physically contiguous heaps)
+ * @map_dma            map the memory for dma to a scatterlist
+ * @unmap_dma          unmap the memory for dma
+ * @map_kernel         map memory to the kernel
+ * @unmap_kernel       unmap memory to the kernel
+ * @map_user           map memory to userspace
+ *
+ * allocate, phys, and map_user return 0 on success, -errno on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on
+ * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
+ * the buffer's private_flags when called from a shrinker. In that
+ * case, the pages being free'd must be truly free'd back to the
+ * system, not put in a page pool or otherwise cached.
+ */
+struct ion_heap_ops {
+       int (*allocate) (struct ion_heap *heap,
+                        struct ion_buffer *buffer, unsigned long len,
+                        unsigned long align, unsigned long flags);
+       void (*free) (struct ion_buffer *buffer);
+       int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
+                    ion_phys_addr_t *addr, size_t *len);
+       struct sg_table *(*map_dma) (struct ion_heap *heap,
+                                       struct ion_buffer *buffer);
+       void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
+       void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+       void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+       int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
+                        struct vm_area_struct *vma);
+       int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
+};
+
+/**
+ * heap flags - flags between the heaps and core ion code
+ */
+#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
+
+/**
+ * private flags - flags internal to ion
+ */
+/*
+ * Buffer is being freed from a shrinker function. Skip any possible
+ * heap-specific caching mechanism (e.g. page pools). Guarantees that
+ * any buffer storage that came from the system allocator will be
+ * returned to the system allocator.
+ */
+#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
+
+/**
+ * struct ion_heap - represents a heap in the system
+ * @node:              rb node to put the heap on the device's tree of heaps
+ * @dev:               back pointer to the ion_device
+ * @type:              type of heap
+ * @ops:               ops struct as above
+ * @flags:             flags
+ * @id:                        id of heap, also indicates priority of this heap when
+ *                     allocating.  These are specified by platform data and
+ *                     MUST be unique
+ * @name:              used for debugging
+ * @shrinker:          a shrinker for the heap
+ * @free_list:         free list head if deferred free is used
+ * @free_list_size     size of the deferred free list in bytes
+ * @lock:              protects the free list
+ * @waitqueue:         queue to wait on from deferred free thread
+ * @task:              task struct of deferred free thread
+ * @debug_show:                called when heap debug file is read to add any
+ *                     heap specific debug info to output
+ *
+ * Represents a pool of memory from which buffers can be made.  In some
+ * systems the only heap is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved heap.
+ */
+struct ion_heap {
+       struct plist_node node;
+       struct ion_device *dev;
+       enum ion_heap_type type;
+       struct ion_heap_ops *ops;
+       unsigned long flags;
+       unsigned int id;
+       const char *name;
+       struct shrinker shrinker;
+       struct list_head free_list;
+       size_t free_list_size;
+       spinlock_t free_lock;
+       wait_queue_head_t waitqueue;
+       struct task_struct *task;
+       int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
+};
+
+/**
+ * ion_buffer_cached - this ion buffer is cached
+ * @buffer:            buffer
+ *
+ * indicates whether this ion buffer is cached
+ */
+bool ion_buffer_cached(struct ion_buffer *buffer);
+
+/**
+ * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
+ * @buffer:            buffer
+ *
+ * indicates whether userspace mappings of this buffer will be faulted
+ * in, this can affect how buffers are allocated from the heap.
+ */
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
+
+/**
+ * ion_device_create - allocates and returns an ion device
+ * @custom_ioctl:      arch specific ioctl function if applicable
+ *
+ * returns a valid device or -PTR_ERR
+ */
+struct ion_device *ion_device_create(long (*custom_ioctl)
+                                    (struct ion_client *client,
+                                     unsigned int cmd,
+                                     unsigned long arg));
+
+/**
+ * ion_device_destroy - free and device and it's resource
+ * @dev:               the device
+ */
+void ion_device_destroy(struct ion_device *dev);
+
+/**
+ * ion_device_add_heap - adds a heap to the ion device
+ * @dev:               the device
+ * @heap:              the heap to add
+ */
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
+
+/**
+ * some helpers for common operations on buffers using the sg_table
+ * and vaddr fields
+ */
+void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
+void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
+int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
+                       struct vm_area_struct *);
+int ion_heap_buffer_zero(struct ion_buffer *buffer);
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
+
+/**
+ * ion_heap_init_shrinker
+ * @heap:              the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the heap's shrink op.
+ */
+void ion_heap_init_shrinker(struct ion_heap *heap);
+
+/**
+ * ion_heap_init_deferred_free -- initialize deferred free functionality
+ * @heap:              the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
+ * be called to setup deferred frees. Calls to free the buffer will
+ * return immediately and the actual free will occur some time later
+ */
+int ion_heap_init_deferred_free(struct ion_heap *heap);
+
+/**
+ * ion_heap_freelist_add - add a buffer to the deferred free list
+ * @heap:              the heap
+ * @buffer:            the buffer
+ *
+ * Adds an item to the deferred freelist.
+ */
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
+
+/**
+ * ion_heap_freelist_drain - drain the deferred free list
+ * @heap:              the heap
+ * @size:              ammount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed.  The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ */
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
+
+/**
+ * ion_heap_freelist_shrink - drain the deferred free
+ *                             list, skipping any heap-specific
+ *                             pooling or caching mechanisms
+ *
+ * @heap:              the heap
+ * @size:              amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed.  The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ *
+ * Unlike with @ion_heap_freelist_drain, don't put any pages back into
+ * page pools or otherwise cache the pages. Everything must be
+ * genuinely free'd back to the system. If you're free'ing from a
+ * shrinker you probably want to use this. Note that this relies on
+ * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
+ * flag.
+ */
+size_t ion_heap_freelist_shrink(struct ion_heap *heap,
+                                       size_t size);
+
+/**
+ * ion_heap_freelist_size - returns the size of the freelist in bytes
+ * @heap:              the heap
+ */
+size_t ion_heap_freelist_size(struct ion_heap *heap);
+
+
+/**
+ * functions for creating and destroying the built in ion heaps.
+ * architectures can add their own custom architecture specific
+ * heaps as appropriate.
+ */
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *);
+void ion_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
+void ion_system_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
+void ion_system_contig_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
+void ion_carveout_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
+void ion_chunk_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
+void ion_cma_heap_destroy(struct ion_heap *);
+
+/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
+                                     unsigned long align);
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+                      unsigned long size);
+/**
+ * The carveout heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
+
+/**
+ * functions for creating and destroying a heap pool -- allows you
+ * to keep a pool of pre allocated memory to use from your heap.  Keeping
+ * a pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant peformance benefit on
+ * many systems */
+
+/**
+ * struct ion_page_pool - pagepool struct
+ * @high_count:                number of highmem items in the pool
+ * @low_count:         number of lowmem items in the pool
+ * @high_items:                list of highmem items
+ * @low_items:         list of lowmem items
+ * @mutex:             lock protecting this struct and especially the count
+ *                     item list
+ * @gfp_mask:          gfp_mask to use from alloc
+ * @order:             order of pages in the pool
+ * @list:              plist node for list of pools
+ *
+ * Allows you to keep a pool of pre allocated pages to use from your heap.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant peformance benefit
+ * on many systems
+ */
+struct ion_page_pool {
+       int high_count;
+       int low_count;
+       struct list_head high_items;
+       struct list_head low_items;
+       struct mutex mutex;
+       gfp_t gfp_mask;
+       unsigned int order;
+       struct plist_node list;
+};
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+void ion_page_pool_destroy(struct ion_page_pool *);
+void *ion_page_pool_alloc(struct ion_page_pool *);
+void ion_page_pool_free(struct ion_page_pool *, struct page *);
+
+/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
+ * @pool:              the pool
+ * @gfp_mask:          the memory type to reclaim
+ * @nr_to_scan:                number of items to shrink in pages
+ *
+ * returns the number of items freed in pages
+ */
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+                         int nr_to_scan);
+
+/**
+ * ion_pages_sync_for_device - cache flush pages for use with the specified
+ *                             device
+ * @dev:               the device the pages will be used with
+ * @page:              the first page to be flushed
+ * @size:              size in bytes of region to be flushed
+ * @dir:               direction of dma transfer
+ */
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+               size_t size, enum dma_data_direction dir);
+
+#endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
new file mode 100644 (file)
index 0000000..a052418
--- /dev/null
@@ -0,0 +1,446 @@
+/*
+ * drivers/gpu/ion/ion_system_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+                                    __GFP_NORETRY) & ~__GFP_WAIT;
+static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
+static const unsigned int orders[] = {8, 4, 0};
+static const int num_orders = ARRAY_SIZE(orders);
+static int order_to_index(unsigned int order)
+{
+       int i;
+       for (i = 0; i < num_orders; i++)
+               if (order == orders[i])
+                       return i;
+       BUG();
+       return -1;
+}
+
+static unsigned int order_to_size(int order)
+{
+       return PAGE_SIZE << order;
+}
+
+struct ion_system_heap {
+       struct ion_heap heap;
+       struct ion_page_pool **pools;
+};
+
+struct page_info {
+       struct page *page;
+       unsigned int order;
+       struct list_head list;
+};
+
+static struct page *alloc_buffer_page(struct ion_system_heap *heap,
+                                     struct ion_buffer *buffer,
+                                     unsigned long order)
+{
+       bool cached = ion_buffer_cached(buffer);
+       struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+       struct page *page;
+
+       if (!cached) {
+               page = ion_page_pool_alloc(pool);
+       } else {
+               gfp_t gfp_flags = low_order_gfp_flags;
+
+               if (order > 4)
+                       gfp_flags = high_order_gfp_flags;
+               page = alloc_pages(gfp_flags, order);
+               if (!page)
+                       return NULL;
+               ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+                                               DMA_BIDIRECTIONAL);
+       }
+       if (!page)
+               return NULL;
+
+       return page;
+}
+
+static void free_buffer_page(struct ion_system_heap *heap,
+                            struct ion_buffer *buffer, struct page *page,
+                            unsigned int order)
+{
+       bool cached = ion_buffer_cached(buffer);
+
+       if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
+               struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+               ion_page_pool_free(pool, page);
+       } else {
+               __free_pages(page, order);
+       }
+}
+
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+                                                struct ion_buffer *buffer,
+                                                unsigned long size,
+                                                unsigned int max_order)
+{
+       struct page *page;
+       struct page_info *info;
+       int i;
+
+       info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
+       if (!info)
+               return NULL;
+
+       for (i = 0; i < num_orders; i++) {
+               if (size < order_to_size(orders[i]))
+                       continue;
+               if (max_order < orders[i])
+                       continue;
+
+               page = alloc_buffer_page(heap, buffer, orders[i]);
+               if (!page)
+                       continue;
+
+               info->page = page;
+               info->order = orders[i];
+               INIT_LIST_HEAD(&info->list);
+               return info;
+       }
+       kfree(info);
+
+       return NULL;
+}
+
+static int ion_system_heap_allocate(struct ion_heap *heap,
+                                    struct ion_buffer *buffer,
+                                    unsigned long size, unsigned long align,
+                                    unsigned long flags)
+{
+       struct ion_system_heap *sys_heap = container_of(heap,
+                                                       struct ion_system_heap,
+                                                       heap);
+       struct sg_table *table;
+       struct scatterlist *sg;
+       int ret;
+       struct list_head pages;
+       struct page_info *info, *tmp_info;
+       int i = 0;
+       unsigned long size_remaining = PAGE_ALIGN(size);
+       unsigned int max_order = orders[0];
+
+       if (align > PAGE_SIZE)
+               return -EINVAL;
+
+       if (size / PAGE_SIZE > totalram_pages / 2)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&pages);
+       while (size_remaining > 0) {
+               info = alloc_largest_available(sys_heap, buffer, size_remaining,
+                                               max_order);
+               if (!info)
+                       goto err;
+               list_add_tail(&info->list, &pages);
+               size_remaining -= (1 << info->order) * PAGE_SIZE;
+               max_order = info->order;
+               i++;
+       }
+       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+       if (!table)
+               goto err;
+
+       ret = sg_alloc_table(table, i, GFP_KERNEL);
+       if (ret)
+               goto err1;
+
+       sg = table->sgl;
+       list_for_each_entry_safe(info, tmp_info, &pages, list) {
+               struct page *page = info->page;
+               sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
+               sg = sg_next(sg);
+               list_del(&info->list);
+               kfree(info);
+       }
+
+       buffer->priv_virt = table;
+       return 0;
+err1:
+       kfree(table);
+err:
+       list_for_each_entry_safe(info, tmp_info, &pages, list) {
+               free_buffer_page(sys_heap, buffer, info->page, info->order);
+               kfree(info);
+       }
+       return -ENOMEM;
+}
+
+static void ion_system_heap_free(struct ion_buffer *buffer)
+{
+       struct ion_heap *heap = buffer->heap;
+       struct ion_system_heap *sys_heap = container_of(heap,
+                                                       struct ion_system_heap,
+                                                       heap);
+       struct sg_table *table = buffer->sg_table;
+       bool cached = ion_buffer_cached(buffer);
+       struct scatterlist *sg;
+       LIST_HEAD(pages);
+       int i;
+
+       /* uncached pages come from the page pools, zero them before returning
+          for security purposes (other allocations are zerod at alloc time */
+       if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
+               ion_heap_buffer_zero(buffer);
+
+       for_each_sg(table->sgl, sg, table->nents, i)
+               free_buffer_page(sys_heap, buffer, sg_page(sg),
+                               get_order(sg->length));
+       sg_free_table(table);
+       kfree(table);
+}
+
+static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+                                               struct ion_buffer *buffer)
+{
+       return buffer->priv_virt;
+}
+
+static void ion_system_heap_unmap_dma(struct ion_heap *heap,
+                                     struct ion_buffer *buffer)
+{
+       return;
+}
+
+static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
+                                       int nr_to_scan)
+{
+       struct ion_system_heap *sys_heap;
+       int nr_total = 0;
+       int i;
+
+       sys_heap = container_of(heap, struct ion_system_heap, heap);
+
+       for (i = 0; i < num_orders; i++) {
+               struct ion_page_pool *pool = sys_heap->pools[i];
+               nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
+       }
+
+       return nr_total;
+}
+
+static struct ion_heap_ops system_heap_ops = {
+       .allocate = ion_system_heap_allocate,
+       .free = ion_system_heap_free,
+       .map_dma = ion_system_heap_map_dma,
+       .unmap_dma = ion_system_heap_unmap_dma,
+       .map_kernel = ion_heap_map_kernel,
+       .unmap_kernel = ion_heap_unmap_kernel,
+       .map_user = ion_heap_map_user,
+       .shrink = ion_system_heap_shrink,
+};
+
+static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
+                                     void *unused)
+{
+
+       struct ion_system_heap *sys_heap = container_of(heap,
+                                                       struct ion_system_heap,
+                                                       heap);
+       int i;
+       for (i = 0; i < num_orders; i++) {
+               struct ion_page_pool *pool = sys_heap->pools[i];
+               seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
+                          pool->high_count, pool->order,
+                          (1 << pool->order) * PAGE_SIZE * pool->high_count);
+               seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
+                          pool->low_count, pool->order,
+                          (1 << pool->order) * PAGE_SIZE * pool->low_count);
+       }
+       return 0;
+}
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+{
+       struct ion_system_heap *heap;
+       int i;
+
+       heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
+       if (!heap)
+               return ERR_PTR(-ENOMEM);
+       heap->heap.ops = &system_heap_ops;
+       heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+       heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+       heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
+                             GFP_KERNEL);
+       if (!heap->pools)
+               goto err_alloc_pools;
+       for (i = 0; i < num_orders; i++) {
+               struct ion_page_pool *pool;
+               gfp_t gfp_flags = low_order_gfp_flags;
+
+               if (orders[i] > 4)
+                       gfp_flags = high_order_gfp_flags;
+               pool = ion_page_pool_create(gfp_flags, orders[i]);
+               if (!pool)
+                       goto err_create_pool;
+               heap->pools[i] = pool;
+       }
+
+       heap->heap.debug_show = ion_system_heap_debug_show;
+       return &heap->heap;
+err_create_pool:
+       for (i = 0; i < num_orders; i++)
+               if (heap->pools[i])
+                       ion_page_pool_destroy(heap->pools[i]);
+       kfree(heap->pools);
+err_alloc_pools:
+       kfree(heap);
+       return ERR_PTR(-ENOMEM);
+}
+
+void ion_system_heap_destroy(struct ion_heap *heap)
+{
+       struct ion_system_heap *sys_heap = container_of(heap,
+                                                       struct ion_system_heap,
+                                                       heap);
+       int i;
+
+       for (i = 0; i < num_orders; i++)
+               ion_page_pool_destroy(sys_heap->pools[i]);
+       kfree(sys_heap->pools);
+       kfree(sys_heap);
+}
+
+static int ion_system_contig_heap_allocate(struct ion_heap *heap,
+                                          struct ion_buffer *buffer,
+                                          unsigned long len,
+                                          unsigned long align,
+                                          unsigned long flags)
+{
+       int order = get_order(len);
+       struct page *page;
+       struct sg_table *table;
+       unsigned long i;
+       int ret;
+
+       if (align > (PAGE_SIZE << order))
+               return -EINVAL;
+
+       page = alloc_pages(low_order_gfp_flags, order);
+       if (!page)
+               return -ENOMEM;
+
+       split_page(page, order);
+
+       len = PAGE_ALIGN(len);
+       for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
+               __free_page(page + i);
+
+       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+       if (!table) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ret = sg_alloc_table(table, 1, GFP_KERNEL);
+       if (ret)
+               goto out;
+
+       sg_set_page(table->sgl, page, len, 0);
+
+       buffer->priv_virt = table;
+
+       ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
+
+       return 0;
+
+out:
+       for (i = 0; i < len >> PAGE_SHIFT; i++)
+               __free_page(page + i);
+       kfree(table);
+       return ret;
+}
+
+static void ion_system_contig_heap_free(struct ion_buffer *buffer)
+{
+       struct sg_table *table = buffer->priv_virt;
+       struct page *page = sg_page(table->sgl);
+       unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+       unsigned long i;
+
+       for (i = 0; i < pages; i++)
+               __free_page(page + i);
+       sg_free_table(table);
+       kfree(table);
+}
+
+static int ion_system_contig_heap_phys(struct ion_heap *heap,
+                                      struct ion_buffer *buffer,
+                                      ion_phys_addr_t *addr, size_t *len)
+{
+       struct sg_table *table = buffer->priv_virt;
+       struct page *page = sg_page(table->sgl);
+       *addr = page_to_phys(page);
+       *len = buffer->size;
+       return 0;
+}
+
+static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+                                               struct ion_buffer *buffer)
+{
+       return buffer->priv_virt;
+}
+
+static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+                                            struct ion_buffer *buffer)
+{
+}
+
+static struct ion_heap_ops kmalloc_ops = {
+       .allocate = ion_system_contig_heap_allocate,
+       .free = ion_system_contig_heap_free,
+       .phys = ion_system_contig_heap_phys,
+       .map_dma = ion_system_contig_heap_map_dma,
+       .unmap_dma = ion_system_contig_heap_unmap_dma,
+       .map_kernel = ion_heap_map_kernel,
+       .unmap_kernel = ion_heap_unmap_kernel,
+       .map_user = ion_heap_map_user,
+};
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
+{
+       struct ion_heap *heap;
+
+       heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+       if (!heap)
+               return ERR_PTR(-ENOMEM);
+       heap->ops = &kmalloc_ops;
+       heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+       return heap;
+}
+
+void ion_system_contig_heap_destroy(struct ion_heap *heap)
+{
+       kfree(heap);
+}
+
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
new file mode 100644 (file)
index 0000000..654acb5
--- /dev/null
@@ -0,0 +1,282 @@
+/*
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "ion-test: " fmt
+
+#include <linux/dma-buf.h>
+#include <linux/dma-direction.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "ion.h"
+#include "../uapi/ion_test.h"
+
+#define u64_to_uptr(x) ((void __user *)(unsigned long)(x))
+
+struct ion_test_device {
+       struct miscdevice misc;
+};
+
+struct ion_test_data {
+       struct dma_buf *dma_buf;
+       struct device *dev;
+};
+
+static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf,
+               void __user *ptr, size_t offset, size_t size, bool write)
+{
+       int ret = 0;
+       struct dma_buf_attachment *attach;
+       struct sg_table *table;
+       pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
+       enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+       struct sg_page_iter sg_iter;
+       unsigned long offset_page;
+
+       attach = dma_buf_attach(dma_buf, dev);
+       if (IS_ERR(attach))
+               return PTR_ERR(attach);
+
+       table = dma_buf_map_attachment(attach, dir);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       offset_page = offset >> PAGE_SHIFT;
+       offset %= PAGE_SIZE;
+
+       for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) {
+               struct page *page = sg_page_iter_page(&sg_iter);
+               void *vaddr = vmap(&page, 1, VM_MAP, pgprot);
+               size_t to_copy = PAGE_SIZE - offset;
+
+               to_copy = min(to_copy, size);
+               if (!vaddr) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               if (write)
+                       ret = copy_from_user(vaddr + offset, ptr, to_copy);
+               else
+                       ret = copy_to_user(ptr, vaddr + offset, to_copy);
+
+               vunmap(vaddr);
+               if (ret) {
+                       ret = -EFAULT;
+                       goto err;
+               }
+               size -= to_copy;
+               if (!size)
+                       break;
+               ptr += to_copy;
+               offset = 0;
+       }
+
+err:
+       dma_buf_unmap_attachment(attach, table, dir);
+       dma_buf_detach(dma_buf, attach);
+       return ret;
+}
+
+static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
+               size_t offset, size_t size, bool write)
+{
+       int ret;
+       unsigned long page_offset = offset >> PAGE_SHIFT;
+       size_t copy_offset = offset % PAGE_SIZE;
+       size_t copy_size = size;
+       enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+       if (offset > dma_buf->size || size > dma_buf->size - offset)
+               return -EINVAL;
+
+       ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
+       if (ret)
+               return ret;
+
+       while (copy_size > 0) {
+               size_t to_copy;
+               void *vaddr = dma_buf_kmap(dma_buf, page_offset);
+
+               if (!vaddr)
+                       goto err;
+
+               to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size);
+
+               if (write)
+                       ret = copy_from_user(vaddr + copy_offset, ptr, to_copy);
+               else
+                       ret = copy_to_user(ptr, vaddr + copy_offset, to_copy);
+
+               dma_buf_kunmap(dma_buf, page_offset, vaddr);
+               if (ret) {
+                       ret = -EFAULT;
+                       goto err;
+               }
+
+               copy_size -= to_copy;
+               ptr += to_copy;
+               page_offset++;
+               copy_offset = 0;
+       }
+err:
+       dma_buf_end_cpu_access(dma_buf, offset, size, dir);
+       return ret;
+}
+
+static long ion_test_ioctl(struct file *filp, unsigned int cmd,
+                                               unsigned long arg)
+{
+       struct ion_test_data *test_data = filp->private_data;
+       int ret = 0;
+
+       union {
+               struct ion_test_rw_data test_rw;
+       } data;
+
+       if (_IOC_SIZE(cmd) > sizeof(data))
+               return -EINVAL;
+
+       if (_IOC_DIR(cmd) & _IOC_WRITE)
+               if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+                       return -EFAULT;
+
+       switch (cmd) {
+       case ION_IOC_TEST_SET_FD:
+       {
+               struct dma_buf *dma_buf = NULL;
+               int fd = arg;
+
+               if (fd >= 0) {
+                       dma_buf = dma_buf_get((int)arg);
+                       if (IS_ERR(dma_buf))
+                               return PTR_ERR(dma_buf);
+               }
+               if (test_data->dma_buf)
+                       dma_buf_put(test_data->dma_buf);
+               test_data->dma_buf = dma_buf;
+               break;
+       }
+       case ION_IOC_TEST_DMA_MAPPING:
+       {
+               ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf,
+                                       u64_to_uptr(data.test_rw.ptr),
+                                       data.test_rw.offset, data.test_rw.size,
+                                       data.test_rw.write);
+               break;
+       }
+       case ION_IOC_TEST_KERNEL_MAPPING:
+       {
+               ret = ion_handle_test_kernel(test_data->dma_buf,
+                                       u64_to_uptr(data.test_rw.ptr),
+                                       data.test_rw.offset, data.test_rw.size,
+                                       data.test_rw.write);
+               break;
+       }
+       default:
+               return -ENOTTY;
+       }
+
+       if (_IOC_DIR(cmd) & _IOC_READ) {
+               if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+                       return -EFAULT;
+       }
+       return ret;
+}
+
+static int ion_test_open(struct inode *inode, struct file *file)
+{
+       struct ion_test_data *data;
+       struct miscdevice *miscdev = file->private_data;
+
+       data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->dev = miscdev->parent;
+
+       file->private_data = data;
+
+       return 0;
+}
+
+static int ion_test_release(struct inode *inode, struct file *file)
+{
+       struct ion_test_data *data = file->private_data;
+
+       kfree(data);
+
+       return 0;
+}
+
+static const struct file_operations ion_test_fops = {
+       .owner = THIS_MODULE,
+       .unlocked_ioctl = ion_test_ioctl,
+       .compat_ioctl = ion_test_ioctl,
+       .open = ion_test_open,
+       .release = ion_test_release,
+};
+
+static int __init ion_test_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct ion_test_device *testdev;
+
+       testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device),
+                               GFP_KERNEL);
+       if (!testdev)
+               return -ENOMEM;
+
+       testdev->misc.minor = MISC_DYNAMIC_MINOR;
+       testdev->misc.name = "ion-test";
+       testdev->misc.fops = &ion_test_fops;
+       testdev->misc.parent = &pdev->dev;
+       ret = misc_register(&testdev->misc);
+       if (ret) {
+               pr_err("failed to register misc device.\n");
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, testdev);
+
+       return 0;
+}
+
+static struct platform_driver ion_test_platform_driver = {
+       .driver = {
+               .name = "ion-test",
+       },
+};
+
+static int __init ion_test_init(void)
+{
+       platform_device_register_simple("ion-test", -1, NULL, 0);
+       return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
+}
+
+static void __exit ion_test_exit(void)
+{
+       platform_driver_unregister(&ion_test_platform_driver);
+}
+
+module_init(ion_test_init);
+module_exit(ion_test_exit);
diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile
new file mode 100644 (file)
index 0000000..11cd003
--- /dev/null
@@ -0,0 +1 @@
+obj-y += tegra_ion.o
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
new file mode 100644 (file)
index 0000000..3474c65
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * drivers/gpu/tegra/tegra_ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "../ion.h"
+#include "../ion_priv.h"
+
+static struct ion_device *idev;
+static int num_heaps;
+static struct ion_heap **heaps;
+
+static int tegra_ion_probe(struct platform_device *pdev)
+{
+       struct ion_platform_data *pdata = pdev->dev.platform_data;
+       int err;
+       int i;
+
+       num_heaps = pdata->nr;
+
+       heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
+
+       idev = ion_device_create(NULL);
+       if (IS_ERR_OR_NULL(idev)) {
+               kfree(heaps);
+               return PTR_ERR(idev);
+       }
+
+       /* create the heaps as specified in the board file */
+       for (i = 0; i < num_heaps; i++) {
+               struct ion_platform_heap *heap_data = &pdata->heaps[i];
+
+               heaps[i] = ion_heap_create(heap_data);
+               if (IS_ERR_OR_NULL(heaps[i])) {
+                       err = PTR_ERR(heaps[i]);
+                       goto err;
+               }
+               ion_device_add_heap(idev, heaps[i]);
+       }
+       platform_set_drvdata(pdev, idev);
+       return 0;
+err:
+       for (i = 0; i < num_heaps; i++) {
+               if (heaps[i])
+                       ion_heap_destroy(heaps[i]);
+       }
+       kfree(heaps);
+       return err;
+}
+
+static int tegra_ion_remove(struct platform_device *pdev)
+{
+       struct ion_device *idev = platform_get_drvdata(pdev);
+       int i;
+
+       ion_device_destroy(idev);
+       for (i = 0; i < num_heaps; i++)
+               ion_heap_destroy(heaps[i]);
+       kfree(heaps);
+       return 0;
+}
+
+static struct platform_driver ion_driver = {
+       .probe = tegra_ion_probe,
+       .remove = tegra_ion_remove,
+       .driver = { .name = "ion-tegra" }
+};
+
+module_platform_driver(ion_driver);
+
index fe74494868ef33037e7e43602471b51780f6c37d..a56e0894f66871752d7e9df73653edbc87184b8a 100644 (file)
@@ -39,7 +39,6 @@
 #include <linux/sched.h>
 #include <linux/swap.h>
 #include <linux/rcupdate.h>
-#include <linux/profile.h>
 #include <linux/notifier.h>
 
 static uint32_t lowmem_debug_level = 1;
@@ -74,6 +73,7 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
        int tasksize;
        int i;
        short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
+       int minfree = 0;
        int selected_tasksize = 0;
        short selected_oom_score_adj;
        int array_size = ARRAY_SIZE(lowmem_adj);
@@ -86,8 +86,8 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
        if (lowmem_minfree_size < array_size)
                array_size = lowmem_minfree_size;
        for (i = 0; i < array_size; i++) {
-               if (other_free < lowmem_minfree[i] &&
-                   other_file < lowmem_minfree[i]) {
+               minfree = lowmem_minfree[i];
+               if (other_free < minfree && other_file < minfree) {
                        min_score_adj = lowmem_adj[i];
                        break;
                }
@@ -144,13 +144,22 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
                selected = p;
                selected_tasksize = tasksize;
                selected_oom_score_adj = oom_score_adj;
-               lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n",
-                            p->pid, p->comm, oom_score_adj, tasksize);
+               lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n",
+                            p->comm, p->pid, oom_score_adj, tasksize);
        }
        if (selected) {
-               lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n",
-                            selected->pid, selected->comm,
-                            selected_oom_score_adj, selected_tasksize);
+               lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" \
+                               "   to free %ldkB on behalf of '%s' (%d) because\n" \
+                               "   cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \
+                               "   Free memory is %ldkB above reserved\n",
+                            selected->comm, selected->pid,
+                            selected_oom_score_adj,
+                            selected_tasksize * (long)(PAGE_SIZE / 1024),
+                            current->comm, current->pid,
+                            other_file * (long)(PAGE_SIZE / 1024),
+                            minfree * (long)(PAGE_SIZE / 1024),
+                            min_score_adj,
+                            other_free * (long)(PAGE_SIZE / 1024));
                lowmem_deathpending_timeout = jiffies + HZ;
                send_sig(SIGKILL, selected, 0);
                set_tsk_thread_flag(selected, TIF_MEMDIE);
@@ -178,9 +187,94 @@ static void __exit lowmem_exit(void)
        unregister_shrinker(&lowmem_shrinker);
 }
 
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+static short lowmem_oom_adj_to_oom_score_adj(short oom_adj)
+{
+       if (oom_adj == OOM_ADJUST_MAX)
+               return OOM_SCORE_ADJ_MAX;
+       else
+               return (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
+}
+
+static void lowmem_autodetect_oom_adj_values(void)
+{
+       int i;
+       short oom_adj;
+       short oom_score_adj;
+       int array_size = ARRAY_SIZE(lowmem_adj);
+
+       if (lowmem_adj_size < array_size)
+               array_size = lowmem_adj_size;
+
+       if (array_size <= 0)
+               return;
+
+       oom_adj = lowmem_adj[array_size - 1];
+       if (oom_adj > OOM_ADJUST_MAX)
+               return;
+
+       oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+       if (oom_score_adj <= OOM_ADJUST_MAX)
+               return;
+
+       lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n");
+       for (i = 0; i < array_size; i++) {
+               oom_adj = lowmem_adj[i];
+               oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+               lowmem_adj[i] = oom_score_adj;
+               lowmem_print(1, "oom_adj %d => oom_score_adj %d\n",
+                            oom_adj, oom_score_adj);
+       }
+}
+
+static int lowmem_adj_array_set(const char *val, const struct kernel_param *kp)
+{
+       int ret;
+
+       ret = param_array_ops.set(val, kp);
+
+       /* HACK: Autodetect oom_adj values in lowmem_adj array */
+       lowmem_autodetect_oom_adj_values();
+
+       return ret;
+}
+
+static int lowmem_adj_array_get(char *buffer, const struct kernel_param *kp)
+{
+       return param_array_ops.get(buffer, kp);
+}
+
+static void lowmem_adj_array_free(void *arg)
+{
+       param_array_ops.free(arg);
+}
+
+static struct kernel_param_ops lowmem_adj_array_ops = {
+       .set = lowmem_adj_array_set,
+       .get = lowmem_adj_array_get,
+       .free = lowmem_adj_array_free,
+};
+
+static const struct kparam_array __param_arr_adj = {
+       .max = ARRAY_SIZE(lowmem_adj),
+       .num = &lowmem_adj_size,
+       .ops = &param_ops_short,
+       .elemsize = sizeof(lowmem_adj[0]),
+       .elem = lowmem_adj,
+};
+#endif
+
 module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+__module_param_call(MODULE_PARAM_PREFIX, adj,
+                   &lowmem_adj_array_ops,
+                   .arr = &__param_arr_adj,
+                   S_IRUGO | S_IWUSR, -1);
+__MODULE_PARM_TYPE(adj, "array of short");
+#else
 module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size,
                         S_IRUGO | S_IWUSR);
+#endif
 module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
                         S_IRUGO | S_IWUSR);
 module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
index 585040be5f1828916c648f46670b6be182ed2812..1a50669ec8a945de62a4602fb9ac91b3bf8fc431 100644 (file)
 #define _LINUX_SW_SYNC_H
 
 #include <linux/types.h>
-
-#ifdef __KERNEL__
-
+#include <linux/kconfig.h>
 #include "sync.h"
+#include "uapi/sw_sync.h"
 
 struct sw_sync_timeline {
        struct  sync_timeline   obj;
@@ -35,24 +34,26 @@ struct sw_sync_pt {
        u32                     value;
 };
 
+#if IS_ENABLED(CONFIG_SW_SYNC)
 struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
 void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
 
 struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
-
-#endif /* __KERNEL __ */
-
-struct sw_sync_create_fence_data {
-       __u32   value;
-       char    name[32];
-       __s32   fence; /* fd of new fence */
-};
-
-#define SW_SYNC_IOC_MAGIC      'W'
-
-#define SW_SYNC_IOC_CREATE_FENCE       _IOWR(SW_SYNC_IOC_MAGIC, 0,\
-               struct sw_sync_create_fence_data)
-#define SW_SYNC_IOC_INC                        _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-
+#else
+static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+       return NULL;
+}
+
+static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+}
+
+static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
+               u32 value)
+{
+       return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
 
 #endif /* _LINUX_SW_SYNC_H */
index 3893a35747698603cce0fe62ee7dcc1d1c333352..d38305b409306a95046023ba77af7bb7f3f7e5bf 100644 (file)
@@ -79,27 +79,27 @@ static void sync_timeline_free(struct kref *kref)
                container_of(kref, struct sync_timeline, kref);
        unsigned long flags;
 
-       if (obj->ops->release_obj)
-               obj->ops->release_obj(obj);
-
        spin_lock_irqsave(&sync_timeline_list_lock, flags);
        list_del(&obj->sync_timeline_list);
        spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
 
+       if (obj->ops->release_obj)
+               obj->ops->release_obj(obj);
+
        kfree(obj);
 }
 
 void sync_timeline_destroy(struct sync_timeline *obj)
 {
        obj->destroyed = true;
+       smp_wmb();
 
        /*
-        * If this is not the last reference, signal any children
-        * that their parent is going away.
+        * signal any children that their parent is going away.
         */
+       sync_timeline_signal(obj);
 
-       if (!kref_put(&obj->kref, sync_timeline_free))
-               sync_timeline_signal(obj);
+       kref_put(&obj->kref, sync_timeline_free);
 }
 EXPORT_SYMBOL(sync_timeline_destroy);
 
index 38ea986dc70f84dc72c38918f3a3b07d26e75ff4..75da9e85ac69762779200c10bdf3d766fdebd712 100644 (file)
 #define _LINUX_SYNC_H
 
 #include <linux/types.h>
-#ifdef __KERNEL__
-
 #include <linux/kref.h>
 #include <linux/ktime.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 
+#include "uapi/sync.h"
+
 struct sync_timeline;
 struct sync_pt;
 struct sync_fence;
@@ -341,86 +341,4 @@ int sync_fence_cancel_async(struct sync_fence *fence,
  */
 int sync_fence_wait(struct sync_fence *fence, long timeout);
 
-#endif /* __KERNEL__ */
-
-/**
- * struct sync_merge_data - data passed to merge ioctl
- * @fd2:       file descriptor of second fence
- * @name:      name of new fence
- * @fence:     returns the fd of the new fence to userspace
- */
-struct sync_merge_data {
-       __s32   fd2; /* fd of second fence */
-       char    name[32]; /* name of new fence */
-       __s32   fence; /* fd on newly created fence */
-};
-
-/**
- * struct sync_pt_info - detailed sync_pt information
- * @len:               length of sync_pt_info including any driver_data
- * @obj_name:          name of parent sync_timeline
- * @driver_name:       name of driver implmenting the parent
- * @status:            status of the sync_pt 0:active 1:signaled <0:error
- * @timestamp_ns:      timestamp of status change in nanoseconds
- * @driver_data:       any driver dependant data
- */
-struct sync_pt_info {
-       __u32   len;
-       char    obj_name[32];
-       char    driver_name[32];
-       __s32   status;
-       __u64   timestamp_ns;
-
-       __u8    driver_data[0];
-};
-
-/**
- * struct sync_fence_info_data - data returned from fence info ioctl
- * @len:       ioctl caller writes the size of the buffer its passing in.
- *             ioctl returns length of sync_fence_data reutnred to userspace
- *             including pt_info.
- * @name:      name of fence
- * @status:    status of fence. 1: signaled 0:active <0:error
- * @pt_info:   a sync_pt_info struct for every sync_pt in the fence
- */
-struct sync_fence_info_data {
-       __u32   len;
-       char    name[32];
-       __s32   status;
-
-       __u8    pt_info[0];
-};
-
-#define SYNC_IOC_MAGIC         '>'
-
-/**
- * DOC: SYNC_IOC_WAIT - wait for a fence to signal
- *
- * pass timeout in milliseconds.  Waits indefinitely timeout < 0.
- */
-#define SYNC_IOC_WAIT          _IOW(SYNC_IOC_MAGIC, 0, __s32)
-
-/**
- * DOC: SYNC_IOC_MERGE - merge two fences
- *
- * Takes a struct sync_merge_data.  Creates a new fence containing copies of
- * the sync_pts in both the calling fd and sync_merge_data.fd2.  Returns the
- * new fence's fd in sync_merge_data.fence
- */
-#define SYNC_IOC_MERGE         _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
-
-/**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
- *
- * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len.  On return, len is
- * updated to reflect the total size of the sync_fence_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
- */
-#define SYNC_IOC_FENCE_INFO    _IOWR(SYNC_IOC_MAGIC, 2,\
-       struct sync_fence_info_data)
-
 #endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/uapi/android_alarm.h b/drivers/staging/android/uapi/android_alarm.h
new file mode 100644 (file)
index 0000000..aa013f6
--- /dev/null
@@ -0,0 +1,62 @@
+/* drivers/staging/android/uapi/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ANDROID_ALARM_H
+#define _UAPI_LINUX_ANDROID_ALARM_H
+
+#include <linux/ioctl.h>
+#include <linux/time.h>
+
+enum android_alarm_type {
+       /* return code bit numbers or set alarm arg */
+       ANDROID_ALARM_RTC_WAKEUP,
+       ANDROID_ALARM_RTC,
+       ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+       ANDROID_ALARM_ELAPSED_REALTIME,
+       ANDROID_ALARM_SYSTEMTIME,
+
+       ANDROID_ALARM_TYPE_COUNT,
+
+       /* return code bit numbers */
+       /* ANDROID_ALARM_TIME_CHANGE = 16 */
+};
+
+enum android_alarm_return_flags {
+       ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+       ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+       ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
+                               1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+       ANDROID_ALARM_ELAPSED_REALTIME_MASK =
+                               1U << ANDROID_ALARM_ELAPSED_REALTIME,
+       ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+       ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+};
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type)           _IO('a', 0 | ((type) << 4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT                  _IO('a', 1)
+
+#define ALARM_IOW(c, type, size)            _IOW('a', (c) | ((type) << 4), size)
+/* Set alarm */
+#define ANDROID_ALARM_SET(type)             ALARM_IOW(2, type, struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type)    ALARM_IOW(3, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type)        ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_SET_RTC               _IOW('a', 5, struct timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd)         (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd)    (_IOC_NR(cmd) >> 4)
+
+#endif
diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
new file mode 100644 (file)
index 0000000..ba4743c
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * drivers/staging/android/uapi/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _UAPI_LINUX_ASHMEM_H
+#define _UAPI_LINUX_ASHMEM_H
+
+#include <linux/ioctl.h>
+
+#define ASHMEM_NAME_LEN                256
+
+#define ASHMEM_NAME_DEF                "dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED      0
+#define ASHMEM_WAS_PURGED      1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED     0
+#define ASHMEM_IS_PINNED       1
+
+struct ashmem_pin {
+       __u32 offset;   /* offset into region, in bytes, page-aligned */
+       __u32 len;      /* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC            0x77
+
+#define ASHMEM_SET_NAME                _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME                _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE                _IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE                _IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK   _IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK   _IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN             _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN           _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS  _IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES        _IO(__ASHMEMIOC, 10)
+
+#endif /* _UAPI_LINUX_ASHMEM_H */
diff --git a/drivers/staging/android/uapi/binder.h b/drivers/staging/android/uapi/binder.h
new file mode 100644 (file)
index 0000000..4098c50
--- /dev/null
@@ -0,0 +1,347 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_BINDER_H
+#define _UAPI_LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+       ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+       BINDER_TYPE_BINDER      = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+       BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+       BINDER_TYPE_HANDLE      = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+       BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+       BINDER_TYPE_FD          = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+       FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+       FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+#ifdef BINDER_IPC_32BIT
+typedef __u32 binder_size_t;
+typedef __u32 binder_uintptr_t;
+#else
+typedef __u64 binder_size_t;
+typedef __u64 binder_uintptr_t;
+#endif
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes.  The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur.  The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+       /* 8 bytes for large_flat_header. */
+       __u32   type;
+       __u32   flags;
+
+       /* 8 bytes of data. */
+       union {
+               binder_uintptr_t        binder; /* local object */
+               __u32                   handle; /* remote object */
+       };
+
+       /* extra data associated with local object */
+       binder_uintptr_t        cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses appropriately.
+ */
+
+struct binder_write_read {
+       binder_size_t           write_size;     /* bytes to write */
+       binder_size_t           write_consumed; /* bytes consumed by driver */
+       binder_uintptr_t        write_buffer;
+       binder_size_t           read_size;      /* bytes to read */
+       binder_size_t           read_consumed;  /* bytes consumed by driver */
+       binder_uintptr_t        read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+       /* driver protocol version -- increment with incompatible change */
+       __s32   protocol_version;
+};
+
+/* This is the current protocol version. */
+#ifdef BINDER_IPC_32BIT
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+#else
+#define BINDER_CURRENT_PROTOCOL_VERSION 8
+#endif
+
+#define BINDER_WRITE_READ              _IOWR('b', 1, struct binder_write_read)
+#define        BINDER_SET_IDLE_TIMEOUT         _IOW('b', 3, __s64)
+#define        BINDER_SET_MAX_THREADS          _IOW('b', 5, __u32)
+#define        BINDER_SET_IDLE_PRIORITY        _IOW('b', 6, __s32)
+#define        BINDER_SET_CONTEXT_MGR          _IOW('b', 7, __s32)
+#define        BINDER_THREAD_EXIT              _IOW('b', 8, __s32)
+#define BINDER_VERSION                 _IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted.  This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process.  That is, the process is being destroyed.
+ * You should handle this by exiting from your process.  Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+       TF_ONE_WAY      = 0x01, /* this is a one-way call: async, no return */
+       TF_ROOT_OBJECT  = 0x04, /* contents are the component's root object */
+       TF_STATUS_CODE  = 0x08, /* contents are a 32-bit status code */
+       TF_ACCEPT_FDS   = 0x10, /* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+       /* The first two are only used for bcTRANSACTION and brTRANSACTION,
+        * identifying the target and contents of the transaction.
+        */
+       union {
+               __u32   handle; /* target descriptor of command transaction */
+               binder_uintptr_t ptr;   /* target descriptor of return transaction */
+       } target;
+       binder_uintptr_t        cookie; /* target object cookie */
+       __u32           code;           /* transaction command */
+
+       /* General information about the transaction. */
+       __u32           flags;
+       pid_t           sender_pid;
+       uid_t           sender_euid;
+       binder_size_t   data_size;      /* number of bytes of data */
+       binder_size_t   offsets_size;   /* number of bytes of offsets */
+
+       /* If this transaction is inline, the data immediately
+        * follows here; otherwise, it ends with a pointer to
+        * the data buffer.
+        */
+       union {
+               struct {
+                       /* transaction data */
+                       binder_uintptr_t        buffer;
+                       /* offsets from buffer to flat_binder_object structs */
+                       binder_uintptr_t        offsets;
+               } ptr;
+               __u8    buf[8];
+       } data;
+};
+
+struct binder_ptr_cookie {
+       binder_uintptr_t ptr;
+       binder_uintptr_t cookie;
+};
+
+struct binder_handle_cookie {
+       __u32 handle;
+       binder_uintptr_t cookie;
+} __attribute__((packed));
+
+struct binder_pri_desc {
+       __s32 priority;
+       __u32 desc;
+};
+
+struct binder_pri_ptr_cookie {
+       __s32 priority;
+       binder_uintptr_t ptr;
+       binder_uintptr_t cookie;
+};
+
+enum binder_driver_return_protocol {
+       BR_ERROR = _IOR('r', 0, __s32),
+       /*
+        * int: error code
+        */
+
+       BR_OK = _IO('r', 1),
+       /* No parameters! */
+
+       BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+       BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+       /*
+        * binder_transaction_data: the received command.
+        */
+
+       BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
+       /*
+        * not currently supported
+        * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+        * Else the remote object has acquired a primary reference.
+        */
+
+       BR_DEAD_REPLY = _IO('r', 5),
+       /*
+        * The target of the last transaction (either a bcTRANSACTION or
+        * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
+        */
+
+       BR_TRANSACTION_COMPLETE = _IO('r', 6),
+       /*
+        * No parameters... always refers to the last transaction requested
+        * (including replies).  Note that this will be sent even for
+        * asynchronous transactions.
+        */
+
+       BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+       BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+       BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+       BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+       /*
+        * void *:      ptr to binder
+        * void *: cookie for binder
+        */
+
+       BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+       /*
+        * not currently supported
+        * int: priority
+        * void *: ptr to binder
+        * void *: cookie for binder
+        */
+
+       BR_NOOP = _IO('r', 12),
+       /*
+        * No parameters.  Do nothing and examine the next command.  It exists
+        * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+        */
+
+       BR_SPAWN_LOOPER = _IO('r', 13),
+       /*
+        * No parameters.  The driver has determined that a process has no
+        * threads waiting to service incoming transactions.  When a process
+        * receives this command, it must spawn a new service thread and
+        * register it via bcENTER_LOOPER.
+        */
+
+       BR_FINISHED = _IO('r', 14),
+       /*
+        * not currently supported
+        * stop threadpool thread
+        */
+
+       BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t),
+       /*
+        * void *: cookie
+        */
+       BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t),
+       /*
+        * void *: cookie
+        */
+
+       BR_FAILED_REPLY = _IO('r', 17),
+       /*
+        * The the last transaction (either a bcTRANSACTION or
+        * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
+        */
+};
+
+enum binder_driver_command_protocol {
+       BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+       BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+       /*
+        * binder_transaction_data: the sent command.
+        */
+
+       BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
+       /*
+        * not currently supported
+        * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+        * Else you have acquired a primary reference on the object.
+        */
+
+       BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),
+       /*
+        * void *: ptr to transaction data received on a read
+        */
+
+       BC_INCREFS = _IOW('c', 4, __u32),
+       BC_ACQUIRE = _IOW('c', 5, __u32),
+       BC_RELEASE = _IOW('c', 6, __u32),
+       BC_DECREFS = _IOW('c', 7, __u32),
+       /*
+        * int: descriptor
+        */
+
+       BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+       BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+       /*
+        * void *: ptr to binder
+        * void *: cookie for binder
+        */
+
+       BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+       /*
+        * not currently supported
+        * int: priority
+        * int: descriptor
+        */
+
+       BC_REGISTER_LOOPER = _IO('c', 11),
+       /*
+        * No parameters.
+        * Register a spawned looper thread with the device.
+        */
+
+       BC_ENTER_LOOPER = _IO('c', 12),
+       BC_EXIT_LOOPER = _IO('c', 13),
+       /*
+        * No parameters.
+        * These two commands are sent as an application-level thread
+        * enters and exits the binder loop, respectively.  They are
+        * used so the binder can have an accurate count of the number
+        * of looping threads it has available.
+        */
+
+       BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_handle_cookie),
+       /*
+        * int: handle
+        * void *: cookie
+        */
+
+       BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_handle_cookie),
+       /*
+        * int: handle
+        * void *: cookie
+        */
+
+       BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),
+       /*
+        * void *: cookie
+        */
+};
+
+#endif /* _UAPI_LINUX_BINDER_H */
+
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
new file mode 100644 (file)
index 0000000..f09e7c1
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_H
+#define _UAPI_LINUX_ION_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+typedef int ion_user_handle_t;
+
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM:       memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT:     memory allocated from a prereserved
+ *                              carveout heap, allocations are physically
+ *                              contiguous
+ * @ION_HEAP_TYPE_DMA:          memory allocated via DMA API
+ * @ION_NUM_HEAPS:              helper for iterating over heaps, a bit mask
+ *                              is used to identify the heaps, so only 32
+ *                              total heap types are supported
+ */
+enum ion_heap_type {
+       ION_HEAP_TYPE_SYSTEM,
+       ION_HEAP_TYPE_SYSTEM_CONTIG,
+       ION_HEAP_TYPE_CARVEOUT,
+       ION_HEAP_TYPE_CHUNK,
+       ION_HEAP_TYPE_DMA,
+       ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
+                                are at the end of this enum */
+       ION_NUM_HEAPS = 16,
+};
+
+#define ION_HEAP_SYSTEM_MASK           (1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK    (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK         (1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_TYPE_DMA_MASK         (1 << ION_HEAP_TYPE_DMA)
+
+#define ION_NUM_HEAP_IDS               sizeof(unsigned int) * 8
+
+/**
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+#define ION_FLAG_CACHED 1              /* mappings of this buffer should be
+                                          cached, ion will do cache
+                                          maintenance when the buffer is
+                                          mapped for dma */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2   /* mappings of this buffer will created
+                                          at mmap time, if this is set
+                                          caches must be managed manually */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len:               size of the allocation
+ * @align:             required alignment of the allocation
+ * @heap_id_mask:      mask of heap ids to allocate from
+ * @flags:             flags passed to heap
+ * @handle:            pointer that will be populated with a cookie to use to 
+ *                     refer to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+       size_t len;
+       size_t align;
+       unsigned int heap_id_mask;
+       unsigned int flags;
+       ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle:    a handle
+ * @fd:                a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field.  For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+       ion_user_handle_t handle;
+       int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle:    a handle
+ */
+struct ion_handle_data {
+       ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd:       the custom ioctl function to call
+ * @arg:       additional data to pass to the custom ioctl, typically a user
+ *             pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+       unsigned int cmd;
+       unsigned long arg;
+};
+
+#define ION_IOC_MAGIC          'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC          _IOWR(ION_IOC_MAGIC, 0, \
+                                     struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE           _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP            _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be passed to another process.  The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE          _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT         _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
+ *
+ * Deprecated in favor of using the dma_buf api's correctly (syncing
+ * will happend automatically when the buffer is mapped to a device).
+ * If necessary should be used after touching a cached buffer from the cpu,
+ * this will make the buffer in memory coherent.
+ */
+#define ION_IOC_SYNC           _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM         _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h
new file mode 100644 (file)
index 0000000..ffef06f
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_TEST_H
+#define _UAPI_LINUX_ION_TEST_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct ion_test_rw_data - metadata passed to the kernel to read handle
+ * @ptr:       a pointer to an area at least as large as size
+ * @offset:    offset into the ion buffer to start reading
+ * @size:      size to read or write
+ * @write:     1 to write, 0 to read
+ */
+struct ion_test_rw_data {
+       __u64 ptr;
+       __u64 offset;
+       __u64 size;
+       int write;
+       int __padding;
+};
+
+#define ION_IOC_MAGIC          'I'
+
+/**
+ * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver
+ *
+ * Attaches a dma buf fd to the test driver.  Passing a second fd or -1 will
+ * release the first fd.
+ */
+#define ION_IOC_TEST_SET_FD \
+                       _IO(ION_IOC_MAGIC, 0xf0)
+
+/**
+ * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA
+ *
+ * Reads or writes the memory from a handle using an uncached mapping.  Can be
+ * used by unit tests to emulate a DMA engine as close as possible.  Only
+ * expected to be used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_DMA_MAPPING \
+                       _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data)
+
+/**
+ * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle
+ *
+ * Reads or writes the memory from a handle using a kernel mapping.  Can be
+ * used by unit tests to test heap map_kernel functions.  Only expected to be
+ * used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_KERNEL_MAPPING \
+                       _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
+
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h
new file mode 100644 (file)
index 0000000..9b5d486
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SW_SYNC_H
+#define _UAPI_LINUX_SW_SYNC_H
+
+#include <linux/types.h>
+
+struct sw_sync_create_fence_data {
+       __u32   value;
+       char    name[32];
+       __s32   fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC      'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE       _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+               struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC                        _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+#endif /* _UAPI_LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h
new file mode 100644 (file)
index 0000000..57fdaad
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SYNC_H
+#define _UAPI_LINUX_SYNC_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct sync_merge_data - data passed to merge ioctl
+ * @fd2:       file descriptor of second fence
+ * @name:      name of new fence
+ * @fence:     returns the fd of the new fence to userspace
+ */
+struct sync_merge_data {
+       __s32   fd2; /* fd of second fence */
+       char    name[32]; /* name of new fence */
+       __s32   fence; /* fd on newly created fence */
+};
+
+/**
+ * struct sync_pt_info - detailed sync_pt information
+ * @len:               length of sync_pt_info including any driver_data
+ * @obj_name:          name of parent sync_timeline
+ * @driver_name:       name of driver implmenting the parent
+ * @status:            status of the sync_pt 0:active 1:signaled <0:error
+ * @timestamp_ns:      timestamp of status change in nanoseconds
+ * @driver_data:       any driver dependant data
+ */
+struct sync_pt_info {
+       __u32   len;
+       char    obj_name[32];
+       char    driver_name[32];
+       __s32   status;
+       __u64   timestamp_ns;
+
+       __u8    driver_data[0];
+};
+
+/**
+ * struct sync_fence_info_data - data returned from fence info ioctl
+ * @len:       ioctl caller writes the size of the buffer its passing in.
+ *             ioctl returns length of sync_fence_data reutnred to userspace
+ *             including pt_info.
+ * @name:      name of fence
+ * @status:    status of fence. 1: signaled 0:active <0:error
+ * @pt_info:   a sync_pt_info struct for every sync_pt in the fence
+ */
+struct sync_fence_info_data {
+       __u32   len;
+       char    name[32];
+       __s32   status;
+
+       __u8    pt_info[0];
+};
+
+#define SYNC_IOC_MAGIC         '>'
+
+/**
+ * DOC: SYNC_IOC_WAIT - wait for a fence to signal
+ *
+ * pass timeout in milliseconds.  Waits indefinitely timeout < 0.
+ */
+#define SYNC_IOC_WAIT          _IOW(SYNC_IOC_MAGIC, 0, __s32)
+
+/**
+ * DOC: SYNC_IOC_MERGE - merge two fences
+ *
+ * Takes a struct sync_merge_data.  Creates a new fence containing copies of
+ * the sync_pts in both the calling fd and sync_merge_data.fd2.  Returns the
+ * new fence's fd in sync_merge_data.fence
+ */
+#define SYNC_IOC_MERGE         _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
+
+/**
+ * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ *
+ * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
+ * Caller should write the size of the buffer into len.  On return, len is
+ * updated to reflect the total size of the sync_fence_info_data including
+ * pt_info.
+ *
+ * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
+ * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
+ */
+#define SYNC_IOC_FENCE_INFO    _IOWR(SYNC_IOC_MAGIC, 2,\
+       struct sync_fence_info_data)
+
+#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/drivers/switch/Kconfig b/drivers/switch/Kconfig
new file mode 100644 (file)
index 0000000..19404b6
--- /dev/null
@@ -0,0 +1,15 @@
+menuconfig SWITCH
+       tristate "Switch class support"
+       help
+         Say Y here to enable switch class support. This allows
+         monitoring switches by userspace via sysfs and uevent.
+
+if SWITCH
+
+config SWITCH_GPIO
+       tristate "GPIO Swith support"
+       depends on GPIOLIB
+       help
+         Say Y here to enable GPIO based switch support.
+
+endif # SWITCH
diff --git a/drivers/switch/Makefile b/drivers/switch/Makefile
new file mode 100644 (file)
index 0000000..f7606ed
--- /dev/null
@@ -0,0 +1,4 @@
+# Switch Class Driver
+obj-$(CONFIG_SWITCH)           += switch_class.o
+obj-$(CONFIG_SWITCH_GPIO)      += switch_gpio.o
+
diff --git a/drivers/switch/switch_class.c b/drivers/switch/switch_class.c
new file mode 100644 (file)
index 0000000..e373b62
--- /dev/null
@@ -0,0 +1,174 @@
+/*
+ *  drivers/switch/switch_class.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/switch.h>
+
+struct class *switch_class;
+static atomic_t device_count;
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct switch_dev *sdev = (struct switch_dev *)
+               dev_get_drvdata(dev);
+
+       if (sdev->print_state) {
+               int ret = sdev->print_state(sdev, buf);
+               if (ret >= 0)
+                       return ret;
+       }
+       return sprintf(buf, "%d\n", sdev->state);
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct switch_dev *sdev = (struct switch_dev *)
+               dev_get_drvdata(dev);
+
+       if (sdev->print_name) {
+               int ret = sdev->print_name(sdev, buf);
+               if (ret >= 0)
+                       return ret;
+       }
+       return sprintf(buf, "%s\n", sdev->name);
+}
+
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+static DEVICE_ATTR(name, S_IRUGO, name_show, NULL);
+
+void switch_set_state(struct switch_dev *sdev, int state)
+{
+       char name_buf[120];
+       char state_buf[120];
+       char *prop_buf;
+       char *envp[3];
+       int env_offset = 0;
+       int length;
+
+       if (sdev->state != state) {
+               sdev->state = state;
+
+               prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+               if (prop_buf) {
+                       length = name_show(sdev->dev, NULL, prop_buf);
+                       if (length > 0) {
+                               if (prop_buf[length - 1] == '\n')
+                                       prop_buf[length - 1] = 0;
+                               snprintf(name_buf, sizeof(name_buf),
+                                       "SWITCH_NAME=%s", prop_buf);
+                               envp[env_offset++] = name_buf;
+                       }
+                       length = state_show(sdev->dev, NULL, prop_buf);
+                       if (length > 0) {
+                               if (prop_buf[length - 1] == '\n')
+                                       prop_buf[length - 1] = 0;
+                               snprintf(state_buf, sizeof(state_buf),
+                                       "SWITCH_STATE=%s", prop_buf);
+                               envp[env_offset++] = state_buf;
+                       }
+                       envp[env_offset] = NULL;
+                       kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp);
+                       free_page((unsigned long)prop_buf);
+               } else {
+                       printk(KERN_ERR "out of memory in switch_set_state\n");
+                       kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE);
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(switch_set_state);
+
+static int create_switch_class(void)
+{
+       if (!switch_class) {
+               switch_class = class_create(THIS_MODULE, "switch");
+               if (IS_ERR(switch_class))
+                       return PTR_ERR(switch_class);
+               atomic_set(&device_count, 0);
+       }
+
+       return 0;
+}
+
+int switch_dev_register(struct switch_dev *sdev)
+{
+       int ret;
+
+       if (!switch_class) {
+               ret = create_switch_class();
+               if (ret < 0)
+                       return ret;
+       }
+
+       sdev->index = atomic_inc_return(&device_count);
+       sdev->dev = device_create(switch_class, NULL,
+               MKDEV(0, sdev->index), NULL, sdev->name);
+       if (IS_ERR(sdev->dev))
+               return PTR_ERR(sdev->dev);
+
+       ret = device_create_file(sdev->dev, &dev_attr_state);
+       if (ret < 0)
+               goto err_create_file_1;
+       ret = device_create_file(sdev->dev, &dev_attr_name);
+       if (ret < 0)
+               goto err_create_file_2;
+
+       dev_set_drvdata(sdev->dev, sdev);
+       sdev->state = 0;
+       return 0;
+
+err_create_file_2:
+       device_remove_file(sdev->dev, &dev_attr_state);
+err_create_file_1:
+       device_destroy(switch_class, MKDEV(0, sdev->index));
+       printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(switch_dev_register);
+
+void switch_dev_unregister(struct switch_dev *sdev)
+{
+       device_remove_file(sdev->dev, &dev_attr_name);
+       device_remove_file(sdev->dev, &dev_attr_state);
+       device_destroy(switch_class, MKDEV(0, sdev->index));
+       dev_set_drvdata(sdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(switch_dev_unregister);
+
+static int __init switch_class_init(void)
+{
+       return create_switch_class();
+}
+
+static void __exit switch_class_exit(void)
+{
+       class_destroy(switch_class);
+}
+
+module_init(switch_class_init);
+module_exit(switch_class_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Switch class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/switch/switch_gpio.c b/drivers/switch/switch_gpio.c
new file mode 100644 (file)
index 0000000..621d62d
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ *  drivers/switch/switch_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/switch.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+
+struct gpio_switch_data {
+       struct switch_dev sdev;
+       unsigned gpio;
+       const char *name_on;
+       const char *name_off;
+       const char *state_on;
+       const char *state_off;
+       int irq;
+       struct work_struct work;
+};
+
+static void gpio_switch_work(struct work_struct *work)
+{
+       int state;
+       struct gpio_switch_data *data =
+               container_of(work, struct gpio_switch_data, work);
+
+       state = gpio_get_value(data->gpio);
+       switch_set_state(&data->sdev, state);
+}
+
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+       struct gpio_switch_data *switch_data =
+           (struct gpio_switch_data *)dev_id;
+
+       schedule_work(&switch_data->work);
+       return IRQ_HANDLED;
+}
+
+static ssize_t switch_gpio_print_state(struct switch_dev *sdev, char *buf)
+{
+       struct gpio_switch_data *switch_data =
+               container_of(sdev, struct gpio_switch_data, sdev);
+       const char *state;
+       if (switch_get_state(sdev))
+               state = switch_data->state_on;
+       else
+               state = switch_data->state_off;
+
+       if (state)
+               return sprintf(buf, "%s\n", state);
+       return -1;
+}
+
+static int gpio_switch_probe(struct platform_device *pdev)
+{
+       struct gpio_switch_platform_data *pdata = pdev->dev.platform_data;
+       struct gpio_switch_data *switch_data;
+       int ret = 0;
+
+       if (!pdata)
+               return -EBUSY;
+
+       switch_data = kzalloc(sizeof(struct gpio_switch_data), GFP_KERNEL);
+       if (!switch_data)
+               return -ENOMEM;
+
+       switch_data->sdev.name = pdata->name;
+       switch_data->gpio = pdata->gpio;
+       switch_data->name_on = pdata->name_on;
+       switch_data->name_off = pdata->name_off;
+       switch_data->state_on = pdata->state_on;
+       switch_data->state_off = pdata->state_off;
+       switch_data->sdev.print_state = switch_gpio_print_state;
+
+       ret = switch_dev_register(&switch_data->sdev);
+       if (ret < 0)
+               goto err_switch_dev_register;
+
+       ret = gpio_request(switch_data->gpio, pdev->name);
+       if (ret < 0)
+               goto err_request_gpio;
+
+       ret = gpio_direction_input(switch_data->gpio);
+       if (ret < 0)
+               goto err_set_gpio_input;
+
+       INIT_WORK(&switch_data->work, gpio_switch_work);
+
+       switch_data->irq = gpio_to_irq(switch_data->gpio);
+       if (switch_data->irq < 0) {
+               ret = switch_data->irq;
+               goto err_detect_irq_num_failed;
+       }
+
+       ret = request_irq(switch_data->irq, gpio_irq_handler,
+                         IRQF_TRIGGER_LOW, pdev->name, switch_data);
+       if (ret < 0)
+               goto err_request_irq;
+
+       /* Perform initial detection */
+       gpio_switch_work(&switch_data->work);
+
+       return 0;
+
+err_request_irq:
+err_detect_irq_num_failed:
+err_set_gpio_input:
+       gpio_free(switch_data->gpio);
+err_request_gpio:
+       switch_dev_unregister(&switch_data->sdev);
+err_switch_dev_register:
+       kfree(switch_data);
+
+       return ret;
+}
+
+static int gpio_switch_remove(struct platform_device *pdev)
+{
+       struct gpio_switch_data *switch_data = platform_get_drvdata(pdev);
+
+       cancel_work_sync(&switch_data->work);
+       gpio_free(switch_data->gpio);
+       switch_dev_unregister(&switch_data->sdev);
+       kfree(switch_data);
+
+       return 0;
+}
+
+static struct platform_driver gpio_switch_driver = {
+       .probe          = gpio_switch_probe,
+       .remove         = gpio_switch_remove,
+       .driver         = {
+               .name   = "switch-gpio",
+               .owner  = THIS_MODULE,
+       },
+};
+
+static int __init gpio_switch_init(void)
+{
+       return platform_driver_register(&gpio_switch_driver);
+}
+
+static void __exit gpio_switch_exit(void)
+{
+       platform_driver_unregister(&gpio_switch_driver);
+}
+
+module_init(gpio_switch_init);
+module_exit(gpio_switch_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("GPIO Switch driver");
+MODULE_LICENSE("GPL");
index f87dbfd3277047a9c9d6d2ef6fdd7c3487a32f93..7855f3a4ad05ceeda9f96068b45b37e632dda7d1 100644 (file)
@@ -95,6 +95,9 @@ static void __uart_start(struct tty_struct *tty)
        struct uart_state *state = tty->driver_data;
        struct uart_port *port = state->uart_port;
 
+       if (port->ops->wake_peer)
+               port->ops->wake_peer(port);
+
        if (!uart_circ_empty(&state->xmit) && state->xmit.buf &&
            !tty->stopped && !tty->hw_stopped)
                port->ops->start_tx(port);
index f41aa0d0c414312163bf2076935fb96adccd7036..67409fda70db2d2d2c2e17ce1e898f4b6e630467 100644 (file)
@@ -818,6 +818,24 @@ config USB_G_PRINTER
          For more information, see Documentation/usb/gadget_printer.txt
          which includes sample code for accessing the device file.
 
+config USB_G_ANDROID
+       boolean "Android Composite Gadget"
+       select USB_F_ACM
+       select USB_LIBCOMPOSITE
+       select USB_U_SERIAL
+       help
+         The Android Composite Gadget supports multiple USB
+         functions: adb, acm, mass storage, mtp, accessory
+         and rndis.
+         Each function can be configured and enabled/disabled
+         dynamically from userspace through a sysfs interface.
+
+config USB_ANDROID_RNDIS_DWORD_ALIGNED
+       boolean "Use double word aligned"
+       depends on USB_G_ANDROID
+       help
+               Provides dword aligned for DMA controller.
+
 if TTY
 
 config USB_CDC_COMPOSITE
index 6afd16659e78d719c8fe7a5bf3702fa40329eba4..0ec50ae5b6390ab7c8a9277ebccaa88a63efaca0 100644 (file)
@@ -65,6 +65,7 @@ g_nokia-y                     := nokia.o
 g_webcam-y                     := webcam.o
 g_ncm-y                                := ncm.o
 g_acm_ms-y                     := acm_ms.o
+g_android-y                    := android.o
 g_tcm_usb_gadget-y             := tcm_usb_gadget.o
 
 obj-$(CONFIG_USB_ZERO)         += g_zero.o
@@ -84,4 +85,5 @@ obj-$(CONFIG_USB_G_NOKIA)     += g_nokia.o
 obj-$(CONFIG_USB_G_WEBCAM)     += g_webcam.o
 obj-$(CONFIG_USB_G_NCM)                += g_ncm.o
 obj-$(CONFIG_USB_G_ACM_MS)     += g_acm_ms.o
+obj-$(CONFIG_USB_G_ANDROID)    += g_android.o
 obj-$(CONFIG_USB_GADGET_TARGET)        += tcm_usb_gadget.o
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
new file mode 100644 (file)
index 0000000..04cbeb1
--- /dev/null
@@ -0,0 +1,1519 @@
+/*
+ * Gadget Driver for Android
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *         Benoit Goby <benoit@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+
+#include "gadget_chips.h"
+
+#include "f_fs.c"
+#include "f_audio_source.c"
+#include "f_mass_storage.c"
+#include "f_mtp.c"
+#include "f_accessory.c"
+#define USB_ETH_RNDIS y
+#include "f_rndis.c"
+#include "rndis.c"
+#include "u_ether.c"
+
+MODULE_AUTHOR("Mike Lockwood");
+MODULE_DESCRIPTION("Android Composite USB Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+static const char longname[] = "Gadget Android";
+
+/* Default vendor and product IDs, overridden by userspace */
+#define VENDOR_ID              0x18D1
+#define PRODUCT_ID             0x0001
+
+struct android_usb_function {
+       char *name;
+       void *config;
+
+       struct device *dev;
+       char *dev_name;
+       struct device_attribute **attributes;
+
+       /* for android_dev.enabled_functions */
+       struct list_head enabled_list;
+
+       /* Optional: initialization during gadget bind */
+       int (*init)(struct android_usb_function *, struct usb_composite_dev *);
+       /* Optional: cleanup during gadget unbind */
+       void (*cleanup)(struct android_usb_function *);
+       /* Optional: called when the function is added the list of
+        *              enabled functions */
+       void (*enable)(struct android_usb_function *);
+       /* Optional: called when it is removed */
+       void (*disable)(struct android_usb_function *);
+
+       int (*bind_config)(struct android_usb_function *,
+                          struct usb_configuration *);
+
+       /* Optional: called when the configuration is removed */
+       void (*unbind_config)(struct android_usb_function *,
+                             struct usb_configuration *);
+       /* Optional: handle ctrl requests before the device is configured */
+       int (*ctrlrequest)(struct android_usb_function *,
+                                       struct usb_composite_dev *,
+                                       const struct usb_ctrlrequest *);
+};
+
+struct android_dev {
+       struct android_usb_function **functions;
+       struct list_head enabled_functions;
+       struct usb_composite_dev *cdev;
+       struct device *dev;
+
+       bool enabled;
+       int disable_depth;
+       struct mutex mutex;
+       bool connected;
+       bool sw_connected;
+       struct work_struct work;
+       char ffs_aliases[256];
+};
+
+static struct class *android_class;
+static struct android_dev *_android_dev;
+static int android_bind_config(struct usb_configuration *c);
+static void android_unbind_config(struct usb_configuration *c);
+
+/* string IDs are assigned dynamically */
+#define STRING_MANUFACTURER_IDX                0
+#define STRING_PRODUCT_IDX             1
+#define STRING_SERIAL_IDX              2
+
+static char manufacturer_string[256];
+static char product_string[256];
+static char serial_string[256];
+
+/* String Table */
+static struct usb_string strings_dev[] = {
+       [STRING_MANUFACTURER_IDX].s = manufacturer_string,
+       [STRING_PRODUCT_IDX].s = product_string,
+       [STRING_SERIAL_IDX].s = serial_string,
+       {  }                    /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+       .language       = 0x0409,       /* en-us */
+       .strings        = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+       &stringtab_dev,
+       NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+       .bLength              = sizeof(device_desc),
+       .bDescriptorType      = USB_DT_DEVICE,
+       .bcdUSB               = __constant_cpu_to_le16(0x0200),
+       .bDeviceClass         = USB_CLASS_PER_INTERFACE,
+       .idVendor             = __constant_cpu_to_le16(VENDOR_ID),
+       .idProduct            = __constant_cpu_to_le16(PRODUCT_ID),
+       .bcdDevice            = __constant_cpu_to_le16(0xffff),
+       .bNumConfigurations   = 1,
+};
+
+static struct usb_configuration android_config_driver = {
+       .label          = "android",
+       .unbind         = android_unbind_config,
+       .bConfigurationValue = 1,
+       .bmAttributes   = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+       .MaxPower       = 500, /* 500ma */
+};
+
+static void android_work(struct work_struct *data)
+{
+       struct android_dev *dev = container_of(data, struct android_dev, work);
+       struct usb_composite_dev *cdev = dev->cdev;
+       char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL };
+       char *connected[2]    = { "USB_STATE=CONNECTED", NULL };
+       char *configured[2]   = { "USB_STATE=CONFIGURED", NULL };
+       char **uevent_envp = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cdev->lock, flags);
+       if (cdev->config)
+               uevent_envp = configured;
+       else if (dev->connected != dev->sw_connected)
+               uevent_envp = dev->connected ? connected : disconnected;
+       dev->sw_connected = dev->connected;
+       spin_unlock_irqrestore(&cdev->lock, flags);
+
+       if (uevent_envp) {
+               kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, uevent_envp);
+               pr_info("%s: sent uevent %s\n", __func__, uevent_envp[0]);
+       } else {
+               pr_info("%s: did not send uevent (%d %d %p)\n", __func__,
+                        dev->connected, dev->sw_connected, cdev->config);
+       }
+}
+
+static void android_enable(struct android_dev *dev)
+{
+       struct usb_composite_dev *cdev = dev->cdev;
+
+       if (WARN_ON(!dev->disable_depth))
+               return;
+
+       if (--dev->disable_depth == 0) {
+               usb_add_config(cdev, &android_config_driver,
+                                       android_bind_config);
+               usb_gadget_connect(cdev->gadget);
+       }
+}
+
+static void android_disable(struct android_dev *dev)
+{
+       struct usb_composite_dev *cdev = dev->cdev;
+
+       if (dev->disable_depth++ == 0) {
+               usb_gadget_disconnect(cdev->gadget);
+               /* Cancel pending control requests */
+               usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
+               usb_remove_config(cdev, &android_config_driver);
+       }
+}
+
+/*-------------------------------------------------------------------------*/
+/* Supported functions initialization */
+
+struct functionfs_config {
+       bool opened;
+       bool enabled;
+       struct ffs_data *data;
+};
+
+static int ffs_function_init(struct android_usb_function *f,
+                            struct usb_composite_dev *cdev)
+{
+       f->config = kzalloc(sizeof(struct functionfs_config), GFP_KERNEL);
+       if (!f->config)
+               return -ENOMEM;
+
+       return functionfs_init();
+}
+
+static void ffs_function_cleanup(struct android_usb_function *f)
+{
+       functionfs_cleanup();
+       kfree(f->config);
+}
+
+static void ffs_function_enable(struct android_usb_function *f)
+{
+       struct android_dev *dev = _android_dev;
+       struct functionfs_config *config = f->config;
+
+       config->enabled = true;
+
+       /* Disable the gadget until the function is ready */
+       if (!config->opened)
+               android_disable(dev);
+}
+
+static void ffs_function_disable(struct android_usb_function *f)
+{
+       struct android_dev *dev = _android_dev;
+       struct functionfs_config *config = f->config;
+
+       config->enabled = false;
+
+       /* Balance the disable that was called in closed_callback */
+       if (!config->opened)
+               android_enable(dev);
+}
+
+static int ffs_function_bind_config(struct android_usb_function *f,
+                                   struct usb_configuration *c)
+{
+       struct functionfs_config *config = f->config;
+       return functionfs_bind_config(c->cdev, c, config->data);
+}
+
+static ssize_t
+ffs_aliases_show(struct device *pdev, struct device_attribute *attr, char *buf)
+{
+       struct android_dev *dev = _android_dev;
+       int ret;
+
+       mutex_lock(&dev->mutex);
+       ret = sprintf(buf, "%s\n", dev->ffs_aliases);
+       mutex_unlock(&dev->mutex);
+
+       return ret;
+}
+
+static ssize_t
+ffs_aliases_store(struct device *pdev, struct device_attribute *attr,
+                                       const char *buf, size_t size)
+{
+       struct android_dev *dev = _android_dev;
+       char buff[256];
+
+       mutex_lock(&dev->mutex);
+
+       if (dev->enabled) {
+               mutex_unlock(&dev->mutex);
+               return -EBUSY;
+       }
+
+       strlcpy(buff, buf, sizeof(buff));
+       strlcpy(dev->ffs_aliases, strim(buff), sizeof(dev->ffs_aliases));
+
+       mutex_unlock(&dev->mutex);
+
+       return size;
+}
+
+static DEVICE_ATTR(aliases, S_IRUGO | S_IWUSR, ffs_aliases_show,
+                                              ffs_aliases_store);
+static struct device_attribute *ffs_function_attributes[] = {
+       &dev_attr_aliases,
+       NULL
+};
+
+static struct android_usb_function ffs_function = {
+       .name           = "ffs",
+       .init           = ffs_function_init,
+       .enable         = ffs_function_enable,
+       .disable        = ffs_function_disable,
+       .cleanup        = ffs_function_cleanup,
+       .bind_config    = ffs_function_bind_config,
+       .attributes     = ffs_function_attributes,
+};
+
+static int functionfs_ready_callback(struct ffs_data *ffs)
+{
+       struct android_dev *dev = _android_dev;
+       struct functionfs_config *config = ffs_function.config;
+       int ret = 0;
+
+       mutex_lock(&dev->mutex);
+
+       ret = functionfs_bind(ffs, dev->cdev);
+       if (ret)
+               goto err;
+
+       config->data = ffs;
+       config->opened = true;
+
+       if (config->enabled)
+               android_enable(dev);
+
+err:
+       mutex_unlock(&dev->mutex);
+       return ret;
+}
+
+static void functionfs_closed_callback(struct ffs_data *ffs)
+{
+       struct android_dev *dev = _android_dev;
+       struct functionfs_config *config = ffs_function.config;
+
+       mutex_lock(&dev->mutex);
+
+       if (config->enabled)
+               android_disable(dev);
+
+       config->opened = false;
+       config->data = NULL;
+
+       functionfs_unbind(ffs);
+
+       mutex_unlock(&dev->mutex);
+}
+
+static void *functionfs_acquire_dev_callback(const char *dev_name)
+{
+       return 0;
+}
+
+static void functionfs_release_dev_callback(struct ffs_data *ffs_data)
+{
+}
+
+#define MAX_ACM_INSTANCES 4
+struct acm_function_config {
+       int instances;
+       int instances_on;
+       struct usb_function *f_acm[MAX_ACM_INSTANCES];
+       struct usb_function_instance *f_acm_inst[MAX_ACM_INSTANCES];
+};
+
+static int
+acm_function_init(struct android_usb_function *f,
+               struct usb_composite_dev *cdev)
+{
+       int i;
+       int ret;
+       struct acm_function_config *config;
+
+       config = kzalloc(sizeof(struct acm_function_config), GFP_KERNEL);
+       if (!config)
+               return -ENOMEM;
+       f->config = config;
+
+       for (i = 0; i < MAX_ACM_INSTANCES; i++) {
+               config->f_acm_inst[i] = usb_get_function_instance("acm");
+               if (IS_ERR(config->f_acm_inst[i])) {
+                       ret = PTR_ERR(config->f_acm_inst[i]);
+                       goto err_usb_get_function_instance;
+               }
+               config->f_acm[i] = usb_get_function(config->f_acm_inst[i]);
+               if (IS_ERR(config->f_acm[i])) {
+                       ret = PTR_ERR(config->f_acm[i]);
+                       goto err_usb_get_function;
+               }
+       }
+       return 0;
+err_usb_get_function_instance:
+       while (i-- > 0) {
+               usb_put_function(config->f_acm[i]);
+err_usb_get_function:
+               usb_put_function_instance(config->f_acm_inst[i]);
+       }
+       return ret;
+}
+
+static void acm_function_cleanup(struct android_usb_function *f)
+{
+       int i;
+       struct acm_function_config *config = f->config;
+
+       for (i = 0; i < MAX_ACM_INSTANCES; i++) {
+               usb_put_function(config->f_acm[i]);
+               usb_put_function_instance(config->f_acm_inst[i]);
+       }
+       kfree(f->config);
+       f->config = NULL;
+}
+
+static int
+acm_function_bind_config(struct android_usb_function *f,
+               struct usb_configuration *c)
+{
+       int i;
+       int ret = 0;
+       struct acm_function_config *config = f->config;
+
+       config->instances_on = config->instances;
+       for (i = 0; i < config->instances_on; i++) {
+               ret = usb_add_function(c, config->f_acm[i]);
+               if (ret) {
+                       pr_err("Could not bind acm%u config\n", i);
+                       goto err_usb_add_function;
+               }
+       }
+
+       return 0;
+
+err_usb_add_function:
+       while (i-- > 0)
+               usb_remove_function(c, config->f_acm[i]);
+       return ret;
+}
+
+static void acm_function_unbind_config(struct android_usb_function *f,
+                                      struct usb_configuration *c)
+{
+       int i;
+       struct acm_function_config *config = f->config;
+
+       for (i = 0; i < config->instances_on; i++)
+               usb_remove_function(c, config->f_acm[i]);
+}
+
+static ssize_t acm_instances_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct android_usb_function *f = dev_get_drvdata(dev);
+       struct acm_function_config *config = f->config;
+       return sprintf(buf, "%d\n", config->instances);
+}
+
+static ssize_t acm_instances_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+{
+       struct android_usb_function *f = dev_get_drvdata(dev);
+       struct acm_function_config *config = f->config;
+       int value;
+
+       sscanf(buf, "%d", &value);
+       if (value > MAX_ACM_INSTANCES)
+               value = MAX_ACM_INSTANCES;
+       config->instances = value;
+       return size;
+}
+
+static DEVICE_ATTR(instances, S_IRUGO | S_IWUSR, acm_instances_show,
+                                                acm_instances_store);
+static struct device_attribute *acm_function_attributes[] = {
+       &dev_attr_instances,
+       NULL
+};
+
+static struct android_usb_function acm_function = {
+       .name           = "acm",
+       .init           = acm_function_init,
+       .cleanup        = acm_function_cleanup,
+       .bind_config    = acm_function_bind_config,
+       .unbind_config  = acm_function_unbind_config,
+       .attributes     = acm_function_attributes,
+};
+
+
+static int
+mtp_function_init(struct android_usb_function *f,
+               struct usb_composite_dev *cdev)
+{
+       return mtp_setup();
+}
+
+static void mtp_function_cleanup(struct android_usb_function *f)
+{
+       mtp_cleanup();
+}
+
+static int
+mtp_function_bind_config(struct android_usb_function *f,
+               struct usb_configuration *c)
+{
+       return mtp_bind_config(c, false);
+}
+
+static int
+ptp_function_init(struct android_usb_function *f,
+               struct usb_composite_dev *cdev)
+{
+       /* nothing to do - initialization is handled by mtp_function_init */
+       return 0;
+}
+
+static void ptp_function_cleanup(struct android_usb_function *f)
+{
+       /* nothing to do - cleanup is handled by mtp_function_cleanup */
+}
+
+static int
+ptp_function_bind_config(struct android_usb_function *f,
+               struct usb_configuration *c)
+{
+       return mtp_bind_config(c, true);
+}
+
+static int mtp_function_ctrlrequest(struct android_usb_function *f,
+                                       struct usb_composite_dev *cdev,
+                                       const struct usb_ctrlrequest *c)
+{
+       return mtp_ctrlrequest(cdev, c);
+}
+
+static struct android_usb_function mtp_function = {
+       .name           = "mtp",
+       .init           = mtp_function_init,
+       .cleanup        = mtp_function_cleanup,
+       .bind_config    = mtp_function_bind_config,
+       .ctrlrequest    = mtp_function_ctrlrequest,
+};
+
+/* PTP function is same as MTP with slightly different interface descriptor */
+static struct android_usb_function ptp_function = {
+       .name           = "ptp",
+       .init           = ptp_function_init,
+       .cleanup        = ptp_function_cleanup,
+       .bind_config    = ptp_function_bind_config,
+};
+
+
+struct rndis_function_config {
+       u8      ethaddr[ETH_ALEN];
+       u32     vendorID;
+       char    manufacturer[256];
+       /* "Wireless" RNDIS; auto-detected by Windows */
+       bool    wceis;
+       struct eth_dev *dev;
+};
+
+static int
+rndis_function_init(struct android_usb_function *f,
+               struct usb_composite_dev *cdev)
+{
+       f->config = kzalloc(sizeof(struct rndis_function_config), GFP_KERNEL);
+       if (!f->config)
+               return -ENOMEM;
+       return 0;
+}
+
+static void rndis_function_cleanup(struct android_usb_function *f)
+{
+       kfree(f->config);
+       f->config = NULL;
+}
+
+static int
+rndis_function_bind_config(struct android_usb_function *f,
+               struct usb_configuration *c)
+{
+       int ret;
+       struct eth_dev *dev;
+       struct rndis_function_config *rndis = f->config;
+
+       if (!rndis) {
+               pr_err("%s: rndis_pdata\n", __func__);
+               return -1;
+       }
+
+       pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__,
+               rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+               rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+
+       dev = gether_setup_name(c->cdev->gadget, rndis->ethaddr, "rndis");
+       if (IS_ERR(dev)) {
+               ret = PTR_ERR(dev);
+               pr_err("%s: gether_setup failed\n", __func__);
+               return ret;
+       }
+       rndis->dev = dev;
+
+       if (rndis->wceis) {
+               /* "Wireless" RNDIS; auto-detected by Windows */
+               rndis_iad_descriptor.bFunctionClass =
+                                               USB_CLASS_WIRELESS_CONTROLLER;
+               rndis_iad_descriptor.bFunctionSubClass = 0x01;
+               rndis_iad_descriptor.bFunctionProtocol = 0x03;
+               rndis_control_intf.bInterfaceClass =
+                                               USB_CLASS_WIRELESS_CONTROLLER;
+               rndis_control_intf.bInterfaceSubClass =  0x01;
+               rndis_control_intf.bInterfaceProtocol =  0x03;
+       }
+
+       return rndis_bind_config_vendor(c, rndis->ethaddr, rndis->vendorID,
+                                          rndis->manufacturer, rndis->dev);
+}
+
+static void rndis_function_unbind_config(struct android_usb_function *f,
+                                               struct usb_configuration *c)
+{
+       struct rndis_function_config *rndis = f->config;
+       gether_cleanup(rndis->dev);
+}
+
+static ssize_t rndis_manufacturer_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct android_usb_function *f = dev_get_drvdata(dev);
+       struct rndis_function_config *config = f->config;
+       return sprintf(buf, "%s\n", config->manufacturer);
+}
+
+static ssize_t rndis_manufacturer_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+{
+       struct android_usb_function *f = dev_get_drvdata(dev);
+       struct rndis_function_config *config = f->config;
+
+       if (size >= sizeof(config->manufacturer))
+               return -EINVAL;
+       if (sscanf(buf, "%s", config->manufacturer) == 1)
+               return size;
+       return -1;
+}
+
+static DEVICE_ATTR(manufacturer, S_IRUGO | S_IWUSR, rndis_manufacturer_show,
+                                                   rndis_manufacturer_store);
+
+static ssize_t rndis_wceis_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct android_usb_function *f = dev_get_drvdata(dev);
+       struct rndis_function_config *config = f->config;
+       return sprintf(buf, "%d\n", config->wceis);
+}
+
+static ssize_t rndis_wceis_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+{
+       struct android_usb_function *f = dev_get_drvdata(dev);
+       struct rndis_function_config *config = f->config;
+       int value;
+
+       if (sscanf(buf, "%d", &value) == 1) {
+               config->wceis = value;
+               return size;
+       }
+       return -EINVAL;
+}
+
+static DEVICE_ATTR(wceis, S_IRUGO | S_IWUSR, rndis_wceis_show,
+                                            rndis_wceis_store);
+
+static ssize_t rndis_ethaddr_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct android_usb_function *f = dev_get_drvdata(dev);
+       struct rndis_function_config *rndis = f->config;
+       return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+               rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2],
+               rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]);
+}
+
+static ssize_t rndis_ethaddr_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+{
+       struct android_usb_function *f = dev_get_drvdata(dev);
+       struct rndis_function_config *rndis = f->config;
+
+       if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+                   (int *)&rndis->ethaddr[0], (int *)&rndis->ethaddr[1],
+                   (int *)&rndis->ethaddr[2], (int *)&rndis->ethaddr[3],
+                   (int *)&rndis->ethaddr[4], (int *)&rndis->ethaddr[5]) == 6)
+               return size;
+       return -EINVAL;
+}
+
+static DEVICE_ATTR(ethaddr, S_IRUGO | S_IWUSR, rndis_ethaddr_show,
+                                              rndis_ethaddr_store);
+
+static ssize_t rndis_vendorID_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct android_usb_function *f = dev_get_drvdata(dev);
+       struct rndis_function_config *config = f->config;
+       return sprintf(buf, "%04x\n", config->vendorID);
+}
+
+static ssize_t rndis_vendorID_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+{
+       struct android_usb_function *f = dev_get_drvdata(dev);
+       struct rndis_function_config *config = f->config;
+       int value;
+
+       if (sscanf(buf, "%04x", &value) == 1) {
+               config->vendorID = value;
+               return size;
+       }
+       return -EINVAL;
+}
+
+static DEVICE_ATTR(vendorID, S_IRUGO | S_IWUSR, rndis_vendorID_show,
+                                               rndis_vendorID_store);
+
+static struct device_attribute *rndis_function_attributes[] = {
+       &dev_attr_manufacturer,
+       &dev_attr_wceis,
+       &dev_attr_ethaddr,
+       &dev_attr_vendorID,
+       NULL
+};
+
+static struct android_usb_function rndis_function = {
+       .name           = "rndis",
+       .init           = rndis_function_init,
+       .cleanup        = rndis_function_cleanup,
+       .bind_config    = rndis_function_bind_config,
+       .unbind_config  = rndis_function_unbind_config,
+       .attributes     = rndis_function_attributes,
+};
+
+
+struct mass_storage_function_config {
+       struct fsg_config fsg;
+       struct fsg_common *common;
+};
+
+static int mass_storage_function_init(struct android_usb_function *f,
+                                       struct usb_composite_dev *cdev)
+{
+       struct mass_storage_function_config *config;
+       struct fsg_common *common;
+       int err;
+
+       config = kzalloc(sizeof(struct mass_storage_function_config),
+                                                               GFP_KERNEL);
+       if (!config)
+               return -ENOMEM;
+
+       config->fsg.nluns = 1;
+       config->fsg.luns[0].removable = 1;
+
+       common = fsg_common_init(NULL, cdev, &config->fsg);
+       if (IS_ERR(common)) {
+               kfree(config);
+               return PTR_ERR(common);
+       }
+
+       err = sysfs_create_link(&f->dev->kobj,
+                               &common->luns[0].dev.kobj,
+                               "lun");
+       if (err) {
+               kfree(config);
+               return err;
+       }
+
+       config->common = common;
+       f->config = config;
+       return 0;
+}
+
+static void mass_storage_function_cleanup(struct android_usb_function *f)
+{
+       kfree(f->config);
+       f->config = NULL;
+}
+
+static int mass_storage_function_bind_config(struct android_usb_function *f,
+                                               struct usb_configuration *c)
+{
+       struct mass_storage_function_config *config = f->config;
+       return fsg_bind_config(c->cdev, c, config->common);
+}
+
+static ssize_t mass_storage_inquiry_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct android_usb_function *f = dev_get_drvdata(dev);
+       struct mass_storage_function_config *config = f->config;
+       return sprintf(buf, "%s\n", config->common->inquiry_string);
+}
+
+static ssize_t mass_storage_inquiry_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t size)
+{
+       struct android_usb_function *f = dev_get_drvdata(dev);
+       struct mass_storage_function_config *config = f->config;
+       if (size >= sizeof(config->common->inquiry_string))
+               return -EINVAL;
+       if (sscanf(buf, "%s", config->common->inquiry_string) != 1)
+               return -EINVAL;
+       return size;
+}
+
+static DEVICE_ATTR(inquiry_string, S_IRUGO | S_IWUSR,
+                                       mass_storage_inquiry_show,
+                                       mass_storage_inquiry_store);
+
+static struct device_attribute *mass_storage_function_attributes[] = {
+       &dev_attr_inquiry_string,
+       NULL
+};
+
+static struct android_usb_function mass_storage_function = {
+       .name           = "mass_storage",
+       .init           = mass_storage_function_init,
+       .cleanup        = mass_storage_function_cleanup,
+       .bind_config    = mass_storage_function_bind_config,
+       .attributes     = mass_storage_function_attributes,
+};
+
+
+static int accessory_function_init(struct android_usb_function *f,
+                                       struct usb_composite_dev *cdev)
+{
+       return acc_setup();
+}
+
+static void accessory_function_cleanup(struct android_usb_function *f)
+{
+       acc_cleanup();
+}
+
+static int accessory_function_bind_config(struct android_usb_function *f,
+                                               struct usb_configuration *c)
+{
+       return acc_bind_config(c);
+}
+
+static int accessory_function_ctrlrequest(struct android_usb_function *f,
+                                               struct usb_composite_dev *cdev,
+                                               const struct usb_ctrlrequest *c)
+{
+       return acc_ctrlrequest(cdev, c);
+}
+
+static struct android_usb_function accessory_function = {
+       .name           = "accessory",
+       .init           = accessory_function_init,
+       .cleanup        = accessory_function_cleanup,
+       .bind_config    = accessory_function_bind_config,
+       .ctrlrequest    = accessory_function_ctrlrequest,
+};
+
+static int audio_source_function_init(struct android_usb_function *f,
+                       struct usb_composite_dev *cdev)
+{
+       struct audio_source_config *config;
+
+       config = kzalloc(sizeof(struct audio_source_config), GFP_KERNEL);
+       if (!config)
+               return -ENOMEM;
+       config->card = -1;
+       config->device = -1;
+       f->config = config;
+       return 0;
+}
+
+static void audio_source_function_cleanup(struct android_usb_function *f)
+{
+       kfree(f->config);
+}
+
+static int audio_source_function_bind_config(struct android_usb_function *f,
+                                               struct usb_configuration *c)
+{
+       struct audio_source_config *config = f->config;
+
+       return audio_source_bind_config(c, config);
+}
+
+static void audio_source_function_unbind_config(struct android_usb_function *f,
+                                               struct usb_configuration *c)
+{
+       struct audio_source_config *config = f->config;
+
+       config->card = -1;
+       config->device = -1;
+}
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct android_usb_function *f = dev_get_drvdata(dev);
+       struct audio_source_config *config = f->config;
+
+       /* print PCM card and device numbers */
+       return sprintf(buf, "%d %d\n", config->card, config->device);
+}
+
+static DEVICE_ATTR(pcm, S_IRUGO, audio_source_pcm_show, NULL);
+
+static struct device_attribute *audio_source_function_attributes[] = {
+       &dev_attr_pcm,
+       NULL
+};
+
+static struct android_usb_function audio_source_function = {
+       .name           = "audio_source",
+       .init           = audio_source_function_init,
+       .cleanup        = audio_source_function_cleanup,
+       .bind_config    = audio_source_function_bind_config,
+       .unbind_config  = audio_source_function_unbind_config,
+       .attributes     = audio_source_function_attributes,
+};
+
+static struct android_usb_function *supported_functions[] = {
+       &ffs_function,
+       &acm_function,
+       &mtp_function,
+       &ptp_function,
+       &rndis_function,
+       &mass_storage_function,
+       &accessory_function,
+       &audio_source_function,
+       NULL
+};
+
+
+static int android_init_functions(struct android_usb_function **functions,
+                                 struct usb_composite_dev *cdev)
+{
+       struct android_dev *dev = _android_dev;
+       struct android_usb_function *f;
+       struct device_attribute **attrs;
+       struct device_attribute *attr;
+       int err;
+       int index = 0;
+
+       for (; (f = *functions++); index++) {
+               f->dev_name = kasprintf(GFP_KERNEL, "f_%s", f->name);
+               f->dev = device_create(android_class, dev->dev,
+                               MKDEV(0, index), f, f->dev_name);
+               if (IS_ERR(f->dev)) {
+                       pr_err("%s: Failed to create dev %s", __func__,
+                                                       f->dev_name);
+                       err = PTR_ERR(f->dev);
+                       goto err_create;
+               }
+
+               if (f->init) {
+                       err = f->init(f, cdev);
+                       if (err) {
+                               pr_err("%s: Failed to init %s", __func__,
+                                                               f->name);
+                               goto err_out;
+                       }
+               }
+
+               attrs = f->attributes;
+               if (attrs) {
+                       while ((attr = *attrs++) && !err)
+                               err = device_create_file(f->dev, attr);
+               }
+               if (err) {
+                       pr_err("%s: Failed to create function %s attributes",
+                                       __func__, f->name);
+                       goto err_out;
+               }
+       }
+       return 0;
+
+err_out:
+       device_destroy(android_class, f->dev->devt);
+err_create:
+       kfree(f->dev_name);
+       return err;
+}
+
+static void android_cleanup_functions(struct android_usb_function **functions)
+{
+       struct android_usb_function *f;
+
+       while (*functions) {
+               f = *functions++;
+
+               if (f->dev) {
+                       device_destroy(android_class, f->dev->devt);
+                       kfree(f->dev_name);
+               }
+
+               if (f->cleanup)
+                       f->cleanup(f);
+       }
+}
+
+static int
+android_bind_enabled_functions(struct android_dev *dev,
+                              struct usb_configuration *c)
+{
+       struct android_usb_function *f;
+       int ret;
+
+       list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+               ret = f->bind_config(f, c);
+               if (ret) {
+                       pr_err("%s: %s failed", __func__, f->name);
+                       return ret;
+               }
+       }
+       return 0;
+}
+
+static void
+android_unbind_enabled_functions(struct android_dev *dev,
+                              struct usb_configuration *c)
+{
+       struct android_usb_function *f;
+
+       list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+               if (f->unbind_config)
+                       f->unbind_config(f, c);
+       }
+}
+
+static int android_enable_function(struct android_dev *dev, char *name)
+{
+       struct android_usb_function **functions = dev->functions;
+       struct android_usb_function *f;
+       while ((f = *functions++)) {
+               if (!strcmp(name, f->name)) {
+                       list_add_tail(&f->enabled_list,
+                                               &dev->enabled_functions);
+                       return 0;
+               }
+       }
+       return -EINVAL;
+}
+
+/*-------------------------------------------------------------------------*/
+/* /sys/class/android_usb/android%d/ interface */
+
+static ssize_t
+functions_show(struct device *pdev, struct device_attribute *attr, char *buf)
+{
+       struct android_dev *dev = dev_get_drvdata(pdev);
+       struct android_usb_function *f;
+       char *buff = buf;
+
+       mutex_lock(&dev->mutex);
+
+       list_for_each_entry(f, &dev->enabled_functions, enabled_list)
+               buff += sprintf(buff, "%s,", f->name);
+
+       mutex_unlock(&dev->mutex);
+
+       if (buff != buf)
+               *(buff-1) = '\n';
+       return buff - buf;
+}
+
+static ssize_t
+functions_store(struct device *pdev, struct device_attribute *attr,
+                              const char *buff, size_t size)
+{
+       struct android_dev *dev = dev_get_drvdata(pdev);
+       char *name;
+       char buf[256], *b;
+       char aliases[256], *a;
+       int err;
+       int is_ffs;
+       int ffs_enabled = 0;
+
+       mutex_lock(&dev->mutex);
+
+       if (dev->enabled) {
+               mutex_unlock(&dev->mutex);
+               return -EBUSY;
+       }
+
+       INIT_LIST_HEAD(&dev->enabled_functions);
+
+       strlcpy(buf, buff, sizeof(buf));
+       b = strim(buf);
+
+       while (b) {
+               name = strsep(&b, ",");
+               if (!name)
+                       continue;
+
+               is_ffs = 0;
+               strlcpy(aliases, dev->ffs_aliases, sizeof(aliases));
+               a = aliases;
+
+               while (a) {
+                       char *alias = strsep(&a, ",");
+                       if (alias && !strcmp(name, alias)) {
+                               is_ffs = 1;
+                               break;
+                       }
+               }
+
+               if (is_ffs) {
+                       if (ffs_enabled)
+                               continue;
+                       err = android_enable_function(dev, "ffs");
+                       if (err)
+                               pr_err("android_usb: Cannot enable ffs (%d)",
+                                                                       err);
+                       else
+                               ffs_enabled = 1;
+                       continue;
+               }
+
+               err = android_enable_function(dev, name);
+               if (err)
+                       pr_err("android_usb: Cannot enable '%s' (%d)",
+                                                          name, err);
+       }
+
+       mutex_unlock(&dev->mutex);
+
+       return size;
+}
+
+static ssize_t enable_show(struct device *pdev, struct device_attribute *attr,
+                          char *buf)
+{
+       struct android_dev *dev = dev_get_drvdata(pdev);
+       return sprintf(buf, "%d\n", dev->enabled);
+}
+
+static ssize_t enable_store(struct device *pdev, struct device_attribute *attr,
+                           const char *buff, size_t size)
+{
+       struct android_dev *dev = dev_get_drvdata(pdev);
+       struct usb_composite_dev *cdev = dev->cdev;
+       struct android_usb_function *f;
+       int enabled = 0;
+
+
+       if (!cdev)
+               return -ENODEV;
+
+       mutex_lock(&dev->mutex);
+
+       sscanf(buff, "%d", &enabled);
+       if (enabled && !dev->enabled) {
+               /*
+                * Update values in composite driver's copy of
+                * device descriptor.
+                */
+               cdev->desc.idVendor = device_desc.idVendor;
+               cdev->desc.idProduct = device_desc.idProduct;
+               cdev->desc.bcdDevice = device_desc.bcdDevice;
+               cdev->desc.bDeviceClass = device_desc.bDeviceClass;
+               cdev->desc.bDeviceSubClass = device_desc.bDeviceSubClass;
+               cdev->desc.bDeviceProtocol = device_desc.bDeviceProtocol;
+               list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+                       if (f->enable)
+                               f->enable(f);
+               }
+               android_enable(dev);
+               dev->enabled = true;
+       } else if (!enabled && dev->enabled) {
+               android_disable(dev);
+               list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+                       if (f->disable)
+                               f->disable(f);
+               }
+               dev->enabled = false;
+       } else {
+               pr_err("android_usb: already %s\n",
+                               dev->enabled ? "enabled" : "disabled");
+       }
+
+       mutex_unlock(&dev->mutex);
+       return size;
+}
+
+static ssize_t state_show(struct device *pdev, struct device_attribute *attr,
+                          char *buf)
+{
+       struct android_dev *dev = dev_get_drvdata(pdev);
+       struct usb_composite_dev *cdev = dev->cdev;
+       char *state = "DISCONNECTED";
+       unsigned long flags;
+
+       if (!cdev)
+               goto out;
+
+       spin_lock_irqsave(&cdev->lock, flags);
+       if (cdev->config)
+               state = "CONFIGURED";
+       else if (dev->connected)
+               state = "CONNECTED";
+       spin_unlock_irqrestore(&cdev->lock, flags);
+out:
+       return sprintf(buf, "%s\n", state);
+}
+
+#define DESCRIPTOR_ATTR(field, format_string)                          \
+static ssize_t                                                         \
+field ## _show(struct device *dev, struct device_attribute *attr,      \
+               char *buf)                                              \
+{                                                                      \
+       return sprintf(buf, format_string, device_desc.field);          \
+}                                                                      \
+static ssize_t                                                         \
+field ## _store(struct device *dev, struct device_attribute *attr,     \
+               const char *buf, size_t size)                           \
+{                                                                      \
+       int value;                                                      \
+       if (sscanf(buf, format_string, &value) == 1) {                  \
+               device_desc.field = value;                              \
+               return size;                                            \
+       }                                                               \
+       return -1;                                                      \
+}                                                                      \
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+#define DESCRIPTOR_STRING_ATTR(field, buffer)                          \
+static ssize_t                                                         \
+field ## _show(struct device *dev, struct device_attribute *attr,      \
+               char *buf)                                              \
+{                                                                      \
+       return sprintf(buf, "%s", buffer);                              \
+}                                                                      \
+static ssize_t                                                         \
+field ## _store(struct device *dev, struct device_attribute *attr,     \
+               const char *buf, size_t size)                           \
+{                                                                      \
+       if (size >= sizeof(buffer))                                     \
+               return -EINVAL;                                         \
+       return strlcpy(buffer, buf, sizeof(buffer));                    \
+}                                                                      \
+static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, field ## _show, field ## _store);
+
+
+DESCRIPTOR_ATTR(idVendor, "%04x\n")
+DESCRIPTOR_ATTR(idProduct, "%04x\n")
+DESCRIPTOR_ATTR(bcdDevice, "%04x\n")
+DESCRIPTOR_ATTR(bDeviceClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceSubClass, "%d\n")
+DESCRIPTOR_ATTR(bDeviceProtocol, "%d\n")
+DESCRIPTOR_STRING_ATTR(iManufacturer, manufacturer_string)
+DESCRIPTOR_STRING_ATTR(iProduct, product_string)
+DESCRIPTOR_STRING_ATTR(iSerial, serial_string)
+
+static DEVICE_ATTR(functions, S_IRUGO | S_IWUSR, functions_show,
+                                                functions_store);
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+
+static struct device_attribute *android_usb_attributes[] = {
+       &dev_attr_idVendor,
+       &dev_attr_idProduct,
+       &dev_attr_bcdDevice,
+       &dev_attr_bDeviceClass,
+       &dev_attr_bDeviceSubClass,
+       &dev_attr_bDeviceProtocol,
+       &dev_attr_iManufacturer,
+       &dev_attr_iProduct,
+       &dev_attr_iSerial,
+       &dev_attr_functions,
+       &dev_attr_enable,
+       &dev_attr_state,
+       NULL
+};
+
+/*-------------------------------------------------------------------------*/
+/* Composite driver */
+
+static int android_bind_config(struct usb_configuration *c)
+{
+       struct android_dev *dev = _android_dev;
+       int ret = 0;
+
+       ret = android_bind_enabled_functions(dev, c);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void android_unbind_config(struct usb_configuration *c)
+{
+       struct android_dev *dev = _android_dev;
+
+       android_unbind_enabled_functions(dev, c);
+}
+
+static int android_bind(struct usb_composite_dev *cdev)
+{
+       struct android_dev *dev = _android_dev;
+       struct usb_gadget       *gadget = cdev->gadget;
+       int                     id, ret;
+
+       /*
+        * Start disconnected. Userspace will connect the gadget once
+        * it is done configuring the functions.
+        */
+       usb_gadget_disconnect(gadget);
+
+       ret = android_init_functions(dev->functions, cdev);
+       if (ret)
+               return ret;
+
+       /* Allocate string descriptor numbers ... note that string
+        * contents can be overridden by the composite_dev glue.
+        */
+       id = usb_string_id(cdev);
+       if (id < 0)
+               return id;
+       strings_dev[STRING_MANUFACTURER_IDX].id = id;
+       device_desc.iManufacturer = id;
+
+       id = usb_string_id(cdev);
+       if (id < 0)
+               return id;
+       strings_dev[STRING_PRODUCT_IDX].id = id;
+       device_desc.iProduct = id;
+
+       /* Default strings - should be updated by userspace */
+       strncpy(manufacturer_string, "Android", sizeof(manufacturer_string)-1);
+       strncpy(product_string, "Android", sizeof(product_string) - 1);
+       strncpy(serial_string, "0123456789ABCDEF", sizeof(serial_string) - 1);
+
+       id = usb_string_id(cdev);
+       if (id < 0)
+               return id;
+       strings_dev[STRING_SERIAL_IDX].id = id;
+       device_desc.iSerialNumber = id;
+
+       usb_gadget_set_selfpowered(gadget);
+       dev->cdev = cdev;
+
+       return 0;
+}
+
+static int android_usb_unbind(struct usb_composite_dev *cdev)
+{
+       struct android_dev *dev = _android_dev;
+
+       cancel_work_sync(&dev->work);
+       android_cleanup_functions(dev->functions);
+       return 0;
+}
+
+/* HACK: android needs to override setup for accessory to work */
+static int (*composite_setup_func)(struct usb_gadget *gadget, const struct usb_ctrlrequest *c);
+
+static int
+android_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *c)
+{
+       struct android_dev              *dev = _android_dev;
+       struct usb_composite_dev        *cdev = get_gadget_data(gadget);
+       struct usb_request              *req = cdev->req;
+       struct android_usb_function     *f;
+       int value = -EOPNOTSUPP;
+       unsigned long flags;
+
+       req->zero = 0;
+       req->length = 0;
+       gadget->ep0->driver_data = cdev;
+
+       list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
+               if (f->ctrlrequest) {
+                       value = f->ctrlrequest(f, cdev, c);
+                       if (value >= 0)
+                               break;
+               }
+       }
+
+       /* Special case the accessory function.
+        * It needs to handle control requests before it is enabled.
+        */
+       if (value < 0)
+               value = acc_ctrlrequest(cdev, c);
+
+       if (value < 0)
+               value = composite_setup_func(gadget, c);
+
+       spin_lock_irqsave(&cdev->lock, flags);
+       if (!dev->connected) {
+               dev->connected = 1;
+               schedule_work(&dev->work);
+       } else if (c->bRequest == USB_REQ_SET_CONFIGURATION &&
+                                               cdev->config) {
+               schedule_work(&dev->work);
+       }
+       spin_unlock_irqrestore(&cdev->lock, flags);
+
+       return value;
+}
+
+static void android_disconnect(struct usb_composite_dev *cdev)
+{
+       struct android_dev *dev = _android_dev;
+
+       /* accessory HID support can be active while the
+          accessory function is not actually enabled,
+          so we need to inform it when we are disconnected.
+        */
+       acc_disconnect();
+
+       dev->connected = 0;
+       schedule_work(&dev->work);
+}
+
+static struct usb_composite_driver android_usb_driver = {
+       .name           = "android_usb",
+       .dev            = &device_desc,
+       .strings        = dev_strings,
+       .bind           = android_bind,
+       .unbind         = android_usb_unbind,
+       .disconnect     = android_disconnect,
+       .max_speed      = USB_SPEED_HIGH,
+};
+
+static int android_create_device(struct android_dev *dev)
+{
+       struct device_attribute **attrs = android_usb_attributes;
+       struct device_attribute *attr;
+       int err;
+
+       dev->dev = device_create(android_class, NULL,
+                                       MKDEV(0, 0), NULL, "android0");
+       if (IS_ERR(dev->dev))
+               return PTR_ERR(dev->dev);
+
+       dev_set_drvdata(dev->dev, dev);
+
+       while ((attr = *attrs++)) {
+               err = device_create_file(dev->dev, attr);
+               if (err) {
+                       device_destroy(android_class, dev->dev->devt);
+                       return err;
+               }
+       }
+       return 0;
+}
+
+
+static int __init init(void)
+{
+       struct android_dev *dev;
+       int err;
+
+       android_class = class_create(THIS_MODULE, "android_usb");
+       if (IS_ERR(android_class))
+               return PTR_ERR(android_class);
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev) {
+               err = -ENOMEM;
+               goto err_dev;
+       }
+
+       dev->disable_depth = 1;
+       dev->functions = supported_functions;
+       INIT_LIST_HEAD(&dev->enabled_functions);
+       INIT_WORK(&dev->work, android_work);
+       mutex_init(&dev->mutex);
+
+       err = android_create_device(dev);
+       if (err) {
+               pr_err("%s: failed to create android device %d", __func__, err);
+               goto err_create;
+       }
+
+       _android_dev = dev;
+
+       err = usb_composite_probe(&android_usb_driver);
+       if (err) {
+               pr_err("%s: failed to probe driver %d", __func__, err);
+               goto err_probe;
+       }
+
+       /* HACK: exchange composite's setup with ours */
+       composite_setup_func = android_usb_driver.gadget_driver.setup;
+       android_usb_driver.gadget_driver.setup = android_setup;
+
+       return 0;
+
+err_probe:
+       device_destroy(android_class, dev->dev->devt);
+err_create:
+       kfree(dev);
+err_dev:
+       class_destroy(android_class);
+       return err;
+}
+late_initcall(init);
+
+static void __exit cleanup(void)
+{
+       usb_composite_unregister(&android_usb_driver);
+       class_destroy(android_class);
+       kfree(_android_dev);
+       _android_dev = NULL;
+}
+module_exit(cleanup);
index 44a292b750129009f65ee92656f3a4be75378010..2dd57853de67360511a32946a93785165ebee46f 100644 (file)
@@ -812,7 +812,7 @@ done:
 }
 EXPORT_SYMBOL_GPL(usb_add_config);
 
-static void remove_config(struct usb_composite_dev *cdev,
+static void unbind_config(struct usb_composite_dev *cdev,
                              struct usb_configuration *config)
 {
        while (!list_empty(&config->functions)) {
@@ -827,7 +827,6 @@ static void remove_config(struct usb_composite_dev *cdev,
                        /* may free memory for "f" */
                }
        }
-       list_del(&config->list);
        if (config->unbind) {
                DBG(cdev, "unbind config '%s'/%p\n", config->label, config);
                config->unbind(config);
@@ -854,9 +853,11 @@ void usb_remove_config(struct usb_composite_dev *cdev,
        if (cdev->config == config)
                reset_config(cdev);
 
+       list_del(&config->list);
+
        spin_unlock_irqrestore(&cdev->lock, flags);
 
-       remove_config(cdev, config);
+       unbind_config(cdev, config);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -1525,7 +1526,8 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
                struct usb_configuration        *c;
                c = list_first_entry(&cdev->configs,
                                struct usb_configuration, list);
-               remove_config(cdev, c);
+               list_del(&c->list);
+               unbind_config(cdev, c);
        }
        if (cdev->driver->unbind && unbind_driver)
                cdev->driver->unbind(cdev);
diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c
new file mode 100644 (file)
index 0000000..53e50b5
--- /dev/null
@@ -0,0 +1,1204 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/hid.h>
+#include <linux/hiddev.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_accessory.h>
+
+#define BULK_BUFFER_SIZE    16384
+#define ACC_STRING_SIZE     256
+
+#define PROTOCOL_VERSION    2
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX 0
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+
+struct acc_hid_dev {
+       struct list_head        list;
+       struct hid_device *hid;
+       struct acc_dev *dev;
+       /* accessory defined ID */
+       int id;
+       /* HID report descriptor */
+       u8 *report_desc;
+       /* length of HID report descriptor */
+       int report_desc_len;
+       /* number of bytes of report_desc we have received so far */
+       int report_desc_offset;
+};
+
+struct acc_dev {
+       struct usb_function function;
+       struct usb_composite_dev *cdev;
+       spinlock_t lock;
+
+       struct usb_ep *ep_in;
+       struct usb_ep *ep_out;
+
+       /* set to 1 when we connect */
+       int online:1;
+       /* Set to 1 when we disconnect.
+        * Not cleared until our file is closed.
+        */
+       int disconnected:1;
+
+       /* strings sent by the host */
+       char manufacturer[ACC_STRING_SIZE];
+       char model[ACC_STRING_SIZE];
+       char description[ACC_STRING_SIZE];
+       char version[ACC_STRING_SIZE];
+       char uri[ACC_STRING_SIZE];
+       char serial[ACC_STRING_SIZE];
+
+       /* for acc_complete_set_string */
+       int string_index;
+
+       /* set to 1 if we have a pending start request */
+       int start_requested;
+
+       int audio_mode;
+
+       /* synchronize access to our device file */
+       atomic_t open_excl;
+
+       struct list_head tx_idle;
+
+       wait_queue_head_t read_wq;
+       wait_queue_head_t write_wq;
+       struct usb_request *rx_req[RX_REQ_MAX];
+       int rx_done;
+
+       /* delayed work for handling ACCESSORY_START */
+       struct delayed_work start_work;
+
+       /* worker for registering and unregistering hid devices */
+       struct work_struct hid_work;
+
+       /* list of active HID devices */
+       struct list_head        hid_list;
+
+       /* list of new HID devices to register */
+       struct list_head        new_hid_list;
+
+       /* list of dead HID devices to unregister */
+       struct list_head        dead_hid_list;
+};
+
+static struct usb_interface_descriptor acc_interface_desc = {
+       .bLength                = USB_DT_INTERFACE_SIZE,
+       .bDescriptorType        = USB_DT_INTERFACE,
+       .bInterfaceNumber       = 0,
+       .bNumEndpoints          = 2,
+       .bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
+       .bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
+       .bInterfaceProtocol     = 0,
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_IN,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+       .wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_OUT,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+       .wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_IN,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_out_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_OUT,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_acc_descs[] = {
+       (struct usb_descriptor_header *) &acc_interface_desc,
+       (struct usb_descriptor_header *) &acc_fullspeed_in_desc,
+       (struct usb_descriptor_header *) &acc_fullspeed_out_desc,
+       NULL,
+};
+
+static struct usb_descriptor_header *hs_acc_descs[] = {
+       (struct usb_descriptor_header *) &acc_interface_desc,
+       (struct usb_descriptor_header *) &acc_highspeed_in_desc,
+       (struct usb_descriptor_header *) &acc_highspeed_out_desc,
+       NULL,
+};
+
+static struct usb_string acc_string_defs[] = {
+       [INTERFACE_STRING_INDEX].s      = "Android Accessory Interface",
+       {  },   /* end of list */
+};
+
+static struct usb_gadget_strings acc_string_table = {
+       .language               = 0x0409,       /* en-US */
+       .strings                = acc_string_defs,
+};
+
+static struct usb_gadget_strings *acc_strings[] = {
+       &acc_string_table,
+       NULL,
+};
+
+/* temporary variable used between acc_open() and acc_gadget_bind() */
+static struct acc_dev *_acc_dev;
+
+static inline struct acc_dev *func_to_dev(struct usb_function *f)
+{
+       return container_of(f, struct acc_dev, function);
+}
+
+static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size)
+{
+       struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+       if (!req)
+               return NULL;
+
+       /* now allocate buffers for the requests */
+       req->buf = kmalloc(buffer_size, GFP_KERNEL);
+       if (!req->buf) {
+               usb_ep_free_request(ep, req);
+               return NULL;
+       }
+
+       return req;
+}
+
+static void acc_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+       if (req) {
+               kfree(req->buf);
+               usb_ep_free_request(ep, req);
+       }
+}
+
+/* add a request to the tail of a list */
+static void req_put(struct acc_dev *dev, struct list_head *head,
+               struct usb_request *req)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       list_add_tail(&req->list, head);
+       spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head)
+{
+       unsigned long flags;
+       struct usb_request *req;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       if (list_empty(head)) {
+               req = 0;
+       } else {
+               req = list_first_entry(head, struct usb_request, list);
+               list_del(&req->list);
+       }
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return req;
+}
+
+static void acc_set_disconnected(struct acc_dev *dev)
+{
+       dev->online = 0;
+       dev->disconnected = 1;
+}
+
+static void acc_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+       struct acc_dev *dev = _acc_dev;
+
+       if (req->status == -ESHUTDOWN) {
+               pr_debug("acc_complete_in set disconnected");
+               acc_set_disconnected(dev);
+       }
+
+       req_put(dev, &dev->tx_idle, req);
+
+       wake_up(&dev->write_wq);
+}
+
+static void acc_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+       struct acc_dev *dev = _acc_dev;
+
+       dev->rx_done = 1;
+       if (req->status == -ESHUTDOWN) {
+               pr_debug("acc_complete_out set disconnected");
+               acc_set_disconnected(dev);
+       }
+
+       wake_up(&dev->read_wq);
+}
+
+static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
+{
+       struct acc_dev  *dev = ep->driver_data;
+       char *string_dest = NULL;
+       int length = req->actual;
+
+       if (req->status != 0) {
+               pr_err("acc_complete_set_string, err %d\n", req->status);
+               return;
+       }
+
+       switch (dev->string_index) {
+       case ACCESSORY_STRING_MANUFACTURER:
+               string_dest = dev->manufacturer;
+               break;
+       case ACCESSORY_STRING_MODEL:
+               string_dest = dev->model;
+               break;
+       case ACCESSORY_STRING_DESCRIPTION:
+               string_dest = dev->description;
+               break;
+       case ACCESSORY_STRING_VERSION:
+               string_dest = dev->version;
+               break;
+       case ACCESSORY_STRING_URI:
+               string_dest = dev->uri;
+               break;
+       case ACCESSORY_STRING_SERIAL:
+               string_dest = dev->serial;
+               break;
+       }
+       if (string_dest) {
+               unsigned long flags;
+
+               if (length >= ACC_STRING_SIZE)
+                       length = ACC_STRING_SIZE - 1;
+
+               spin_lock_irqsave(&dev->lock, flags);
+               memcpy(string_dest, req->buf, length);
+               /* ensure zero termination */
+               string_dest[length] = 0;
+               spin_unlock_irqrestore(&dev->lock, flags);
+       } else {
+               pr_err("unknown accessory string index %d\n",
+                       dev->string_index);
+       }
+}
+
+static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
+               struct usb_request *req)
+{
+       struct acc_hid_dev *hid = req->context;
+       struct acc_dev *dev = hid->dev;
+       int length = req->actual;
+
+       if (req->status != 0) {
+               pr_err("acc_complete_set_hid_report_desc, err %d\n",
+                       req->status);
+               return;
+       }
+
+       memcpy(hid->report_desc + hid->report_desc_offset, req->buf, length);
+       hid->report_desc_offset += length;
+       if (hid->report_desc_offset == hid->report_desc_len) {
+               /* After we have received the entire report descriptor
+                * we schedule work to initialize the HID device
+                */
+               schedule_work(&dev->hid_work);
+       }
+}
+
+static void acc_complete_send_hid_event(struct usb_ep *ep,
+               struct usb_request *req)
+{
+       struct acc_hid_dev *hid = req->context;
+       int length = req->actual;
+
+       if (req->status != 0) {
+               pr_err("acc_complete_send_hid_event, err %d\n", req->status);
+               return;
+       }
+
+       hid_report_raw_event(hid->hid, HID_INPUT_REPORT, req->buf, length, 1);
+}
+
+static int acc_hid_parse(struct hid_device *hid)
+{
+       struct acc_hid_dev *hdev = hid->driver_data;
+
+       hid_parse_report(hid, hdev->report_desc, hdev->report_desc_len);
+       return 0;
+}
+
+static int acc_hid_start(struct hid_device *hid)
+{
+       return 0;
+}
+
+static void acc_hid_stop(struct hid_device *hid)
+{
+}
+
+static int acc_hid_open(struct hid_device *hid)
+{
+       return 0;
+}
+
+static void acc_hid_close(struct hid_device *hid)
+{
+}
+
+static struct hid_ll_driver acc_hid_ll_driver = {
+       .parse = acc_hid_parse,
+       .start = acc_hid_start,
+       .stop = acc_hid_stop,
+       .open = acc_hid_open,
+       .close = acc_hid_close,
+};
+
+static struct acc_hid_dev *acc_hid_new(struct acc_dev *dev,
+               int id, int desc_len)
+{
+       struct acc_hid_dev *hdev;
+
+       hdev = kzalloc(sizeof(*hdev), GFP_ATOMIC);
+       if (!hdev)
+               return NULL;
+       hdev->report_desc = kzalloc(desc_len, GFP_ATOMIC);
+       if (!hdev->report_desc) {
+               kfree(hdev);
+               return NULL;
+       }
+       hdev->dev = dev;
+       hdev->id = id;
+       hdev->report_desc_len = desc_len;
+
+       return hdev;
+}
+
+static struct acc_hid_dev *acc_hid_get(struct list_head *list, int id)
+{
+       struct acc_hid_dev *hid;
+
+       list_for_each_entry(hid, list, list) {
+               if (hid->id == id)
+                       return hid;
+       }
+       return NULL;
+}
+
+static int acc_register_hid(struct acc_dev *dev, int id, int desc_length)
+{
+       struct acc_hid_dev *hid;
+       unsigned long flags;
+
+       /* report descriptor length must be > 0 */
+       if (desc_length <= 0)
+               return -EINVAL;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       /* replace HID if one already exists with this ID */
+       hid = acc_hid_get(&dev->hid_list, id);
+       if (!hid)
+               hid = acc_hid_get(&dev->new_hid_list, id);
+       if (hid)
+               list_move(&hid->list, &dev->dead_hid_list);
+
+       hid = acc_hid_new(dev, id, desc_length);
+       if (!hid) {
+               spin_unlock_irqrestore(&dev->lock, flags);
+               return -ENOMEM;
+       }
+
+       list_add(&hid->list, &dev->new_hid_list);
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       /* schedule work to register the HID device */
+       schedule_work(&dev->hid_work);
+       return 0;
+}
+
+static int acc_unregister_hid(struct acc_dev *dev, int id)
+{
+       struct acc_hid_dev *hid;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       hid = acc_hid_get(&dev->hid_list, id);
+       if (!hid)
+               hid = acc_hid_get(&dev->new_hid_list, id);
+       if (!hid) {
+               spin_unlock_irqrestore(&dev->lock, flags);
+               return -EINVAL;
+       }
+
+       list_move(&hid->list, &dev->dead_hid_list);
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       schedule_work(&dev->hid_work);
+       return 0;
+}
+
+static int create_bulk_endpoints(struct acc_dev *dev,
+                               struct usb_endpoint_descriptor *in_desc,
+                               struct usb_endpoint_descriptor *out_desc)
+{
+       struct usb_composite_dev *cdev = dev->cdev;
+       struct usb_request *req;
+       struct usb_ep *ep;
+       int i;
+
+       DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+       ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+       if (!ep) {
+               DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+               return -ENODEV;
+       }
+       DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+       ep->driver_data = dev;          /* claim the endpoint */
+       dev->ep_in = ep;
+
+       ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+       if (!ep) {
+               DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+               return -ENODEV;
+       }
+       DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+       ep->driver_data = dev;          /* claim the endpoint */
+       dev->ep_out = ep;
+
+       ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+       if (!ep) {
+               DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+               return -ENODEV;
+       }
+       DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+       ep->driver_data = dev;          /* claim the endpoint */
+       dev->ep_out = ep;
+
+       /* now allocate requests for our endpoints */
+       for (i = 0; i < TX_REQ_MAX; i++) {
+               req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
+               if (!req)
+                       goto fail;
+               req->complete = acc_complete_in;
+               req_put(dev, &dev->tx_idle, req);
+       }
+       for (i = 0; i < RX_REQ_MAX; i++) {
+               req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE);
+               if (!req)
+                       goto fail;
+               req->complete = acc_complete_out;
+               dev->rx_req[i] = req;
+       }
+
+       return 0;
+
+fail:
+       pr_err("acc_bind() could not allocate requests\n");
+       while ((req = req_get(dev, &dev->tx_idle)))
+               acc_request_free(req, dev->ep_in);
+       for (i = 0; i < RX_REQ_MAX; i++)
+               acc_request_free(dev->rx_req[i], dev->ep_out);
+       return -1;
+}
+
+static ssize_t acc_read(struct file *fp, char __user *buf,
+       size_t count, loff_t *pos)
+{
+       struct acc_dev *dev = fp->private_data;
+       struct usb_request *req;
+       ssize_t r = count;
+       unsigned xfer;
+       int ret = 0;
+
+       pr_debug("acc_read(%zu)\n", count);
+
+       if (dev->disconnected) {
+               pr_debug("acc_read disconnected");
+               return -ENODEV;
+       }
+
+       if (count > BULK_BUFFER_SIZE)
+               count = BULK_BUFFER_SIZE;
+
+       /* we will block until we're online */
+       pr_debug("acc_read: waiting for online\n");
+       ret = wait_event_interruptible(dev->read_wq, dev->online);
+       if (ret < 0) {
+               r = ret;
+               goto done;
+       }
+
+       if (dev->rx_done) {
+               // last req cancelled. try to get it.
+               req = dev->rx_req[0];
+               goto copy_data;
+       }
+
+requeue_req:
+       /* queue a request */
+       req = dev->rx_req[0];
+       req->length = count;
+       dev->rx_done = 0;
+       ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+       if (ret < 0) {
+               r = -EIO;
+               goto done;
+       } else {
+               pr_debug("rx %p queue\n", req);
+       }
+
+       /* wait for a request to complete */
+       ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+       if (ret < 0) {
+               r = ret;
+               ret = usb_ep_dequeue(dev->ep_out, req);
+               if (ret != 0) {
+                       // cancel failed. There can be a data already received.
+                       // it will be retrieved in the next read.
+                       pr_debug("acc_read: cancelling failed %d", ret);
+               }
+               goto done;
+       }
+
+copy_data:
+       dev->rx_done = 0;
+       if (dev->online) {
+               /* If we got a 0-len packet, throw it back and try again. */
+               if (req->actual == 0)
+                       goto requeue_req;
+
+               pr_debug("rx %p %u\n", req, req->actual);
+               xfer = (req->actual < count) ? req->actual : count;
+               r = xfer;
+               if (copy_to_user(buf, req->buf, xfer))
+                       r = -EFAULT;
+       } else
+               r = -EIO;
+
+done:
+       pr_debug("acc_read returning %zd\n", r);
+       return r;
+}
+
+static ssize_t acc_write(struct file *fp, const char __user *buf,
+       size_t count, loff_t *pos)
+{
+       struct acc_dev *dev = fp->private_data;
+       struct usb_request *req = 0;
+       ssize_t r = count;
+       unsigned xfer;
+       int ret;
+
+       pr_debug("acc_write(%zu)\n", count);
+
+       if (!dev->online || dev->disconnected) {
+               pr_debug("acc_write disconnected or not online");
+               return -ENODEV;
+       }
+
+       while (count > 0) {
+               if (!dev->online) {
+                       pr_debug("acc_write dev->error\n");
+                       r = -EIO;
+                       break;
+               }
+
+               /* get an idle tx request to use */
+               req = 0;
+               ret = wait_event_interruptible(dev->write_wq,
+                       ((req = req_get(dev, &dev->tx_idle)) || !dev->online));
+               if (!req) {
+                       r = ret;
+                       break;
+               }
+
+               if (count > BULK_BUFFER_SIZE)
+                       xfer = BULK_BUFFER_SIZE;
+               else
+                       xfer = count;
+               if (copy_from_user(req->buf, buf, xfer)) {
+                       r = -EFAULT;
+                       break;
+               }
+
+               req->length = xfer;
+               ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+               if (ret < 0) {
+                       pr_debug("acc_write: xfer error %d\n", ret);
+                       r = -EIO;
+                       break;
+               }
+
+               buf += xfer;
+               count -= xfer;
+
+               /* zero this so we don't try to free it on error exit */
+               req = 0;
+       }
+
+       if (req)
+               req_put(dev, &dev->tx_idle, req);
+
+       pr_debug("acc_write returning %zd\n", r);
+       return r;
+}
+
+static long acc_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+       struct acc_dev *dev = fp->private_data;
+       char *src = NULL;
+       int ret;
+
+       switch (code) {
+       case ACCESSORY_GET_STRING_MANUFACTURER:
+               src = dev->manufacturer;
+               break;
+       case ACCESSORY_GET_STRING_MODEL:
+               src = dev->model;
+               break;
+       case ACCESSORY_GET_STRING_DESCRIPTION:
+               src = dev->description;
+               break;
+       case ACCESSORY_GET_STRING_VERSION:
+               src = dev->version;
+               break;
+       case ACCESSORY_GET_STRING_URI:
+               src = dev->uri;
+               break;
+       case ACCESSORY_GET_STRING_SERIAL:
+               src = dev->serial;
+               break;
+       case ACCESSORY_IS_START_REQUESTED:
+               return dev->start_requested;
+       case ACCESSORY_GET_AUDIO_MODE:
+               return dev->audio_mode;
+       }
+       if (!src)
+               return -EINVAL;
+
+       ret = strlen(src) + 1;
+       if (copy_to_user((void __user *)value, src, ret))
+               ret = -EFAULT;
+       return ret;
+}
+
+static int acc_open(struct inode *ip, struct file *fp)
+{
+       printk(KERN_INFO "acc_open\n");
+       if (atomic_xchg(&_acc_dev->open_excl, 1))
+               return -EBUSY;
+
+       _acc_dev->disconnected = 0;
+       fp->private_data = _acc_dev;
+       return 0;
+}
+
+static int acc_release(struct inode *ip, struct file *fp)
+{
+       printk(KERN_INFO "acc_release\n");
+
+       WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0));
+       _acc_dev->disconnected = 0;
+       return 0;
+}
+
+/* file operations for /dev/usb_accessory */
+static const struct file_operations acc_fops = {
+       .owner = THIS_MODULE,
+       .read = acc_read,
+       .write = acc_write,
+       .unlocked_ioctl = acc_ioctl,
+       .open = acc_open,
+       .release = acc_release,
+};
+
+static int acc_hid_probe(struct hid_device *hdev,
+               const struct hid_device_id *id)
+{
+       int ret;
+
+       ret = hid_parse(hdev);
+       if (ret)
+               return ret;
+       return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
+static struct miscdevice acc_device = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "usb_accessory",
+       .fops = &acc_fops,
+};
+
+static const struct hid_device_id acc_hid_table[] = {
+       { HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+       { }
+};
+
+static struct hid_driver acc_hid_driver = {
+       .name = "USB accessory",
+       .id_table = acc_hid_table,
+       .probe = acc_hid_probe,
+};
+
+static int acc_ctrlrequest(struct usb_composite_dev *cdev,
+                               const struct usb_ctrlrequest *ctrl)
+{
+       struct acc_dev  *dev = _acc_dev;
+       int     value = -EOPNOTSUPP;
+       struct acc_hid_dev *hid;
+       int offset;
+       u8 b_requestType = ctrl->bRequestType;
+       u8 b_request = ctrl->bRequest;
+       u16     w_index = le16_to_cpu(ctrl->wIndex);
+       u16     w_value = le16_to_cpu(ctrl->wValue);
+       u16     w_length = le16_to_cpu(ctrl->wLength);
+       unsigned long flags;
+
+/*
+       printk(KERN_INFO "acc_ctrlrequest "
+                       "%02x.%02x v%04x i%04x l%u\n",
+                       b_requestType, b_request,
+                       w_value, w_index, w_length);
+*/
+
+       if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {
+               if (b_request == ACCESSORY_START) {
+                       dev->start_requested = 1;
+                       schedule_delayed_work(
+                               &dev->start_work, msecs_to_jiffies(10));
+                       value = 0;
+               } else if (b_request == ACCESSORY_SEND_STRING) {
+                       dev->string_index = w_index;
+                       cdev->gadget->ep0->driver_data = dev;
+                       cdev->req->complete = acc_complete_set_string;
+                       value = w_length;
+               } else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
+                               w_index == 0 && w_length == 0) {
+                       dev->audio_mode = w_value;
+                       value = 0;
+               } else if (b_request == ACCESSORY_REGISTER_HID) {
+                       value = acc_register_hid(dev, w_value, w_index);
+               } else if (b_request == ACCESSORY_UNREGISTER_HID) {
+                       value = acc_unregister_hid(dev, w_value);
+               } else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) {
+                       spin_lock_irqsave(&dev->lock, flags);
+                       hid = acc_hid_get(&dev->new_hid_list, w_value);
+                       spin_unlock_irqrestore(&dev->lock, flags);
+                       if (!hid) {
+                               value = -EINVAL;
+                               goto err;
+                       }
+                       offset = w_index;
+                       if (offset != hid->report_desc_offset
+                               || offset + w_length > hid->report_desc_len) {
+                               value = -EINVAL;
+                               goto err;
+                       }
+                       cdev->req->context = hid;
+                       cdev->req->complete = acc_complete_set_hid_report_desc;
+                       value = w_length;
+               } else if (b_request == ACCESSORY_SEND_HID_EVENT) {
+                       spin_lock_irqsave(&dev->lock, flags);
+                       hid = acc_hid_get(&dev->hid_list, w_value);
+                       spin_unlock_irqrestore(&dev->lock, flags);
+                       if (!hid) {
+                               value = -EINVAL;
+                               goto err;
+                       }
+                       cdev->req->context = hid;
+                       cdev->req->complete = acc_complete_send_hid_event;
+                       value = w_length;
+               }
+       } else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
+               if (b_request == ACCESSORY_GET_PROTOCOL) {
+                       *((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
+                       value = sizeof(u16);
+
+                       /* clear any string left over from a previous session */
+                       memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
+                       memset(dev->model, 0, sizeof(dev->model));
+                       memset(dev->description, 0, sizeof(dev->description));
+                       memset(dev->version, 0, sizeof(dev->version));
+                       memset(dev->uri, 0, sizeof(dev->uri));
+                       memset(dev->serial, 0, sizeof(dev->serial));
+                       dev->start_requested = 0;
+                       dev->audio_mode = 0;
+               }
+       }
+
+       if (value >= 0) {
+               cdev->req->zero = 0;
+               cdev->req->length = value;
+               value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+               if (value < 0)
+                       ERROR(cdev, "%s setup response queue error\n",
+                               __func__);
+       }
+
+err:
+       if (value == -EOPNOTSUPP)
+               VDBG(cdev,
+                       "unknown class-specific control req "
+                       "%02x.%02x v%04x i%04x l%u\n",
+                       ctrl->bRequestType, ctrl->bRequest,
+                       w_value, w_index, w_length);
+       return value;
+}
+
+static int
+acc_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct usb_composite_dev *cdev = c->cdev;
+       struct acc_dev  *dev = func_to_dev(f);
+       int                     id;
+       int                     ret;
+
+       DBG(cdev, "acc_function_bind dev: %p\n", dev);
+
+       ret = hid_register_driver(&acc_hid_driver);
+       if (ret)
+               return ret;
+
+       dev->start_requested = 0;
+
+       /* allocate interface ID(s) */
+       id = usb_interface_id(c, f);
+       if (id < 0)
+               return id;
+       acc_interface_desc.bInterfaceNumber = id;
+
+       /* allocate endpoints */
+       ret = create_bulk_endpoints(dev, &acc_fullspeed_in_desc,
+                       &acc_fullspeed_out_desc);
+       if (ret)
+               return ret;
+
+       /* support high speed hardware */
+       if (gadget_is_dualspeed(c->cdev->gadget)) {
+               acc_highspeed_in_desc.bEndpointAddress =
+                       acc_fullspeed_in_desc.bEndpointAddress;
+               acc_highspeed_out_desc.bEndpointAddress =
+                       acc_fullspeed_out_desc.bEndpointAddress;
+       }
+
+       DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+                       gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+                       f->name, dev->ep_in->name, dev->ep_out->name);
+       return 0;
+}
+
+static void
+kill_all_hid_devices(struct acc_dev *dev)
+{
+       struct acc_hid_dev *hid;
+       struct list_head *entry, *temp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       list_for_each_safe(entry, temp, &dev->hid_list) {
+               hid = list_entry(entry, struct acc_hid_dev, list);
+               list_del(&hid->list);
+               list_add(&hid->list, &dev->dead_hid_list);
+       }
+       list_for_each_safe(entry, temp, &dev->new_hid_list) {
+               hid = list_entry(entry, struct acc_hid_dev, list);
+               list_del(&hid->list);
+               list_add(&hid->list, &dev->dead_hid_list);
+       }
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       schedule_work(&dev->hid_work);
+}
+
+static void
+acc_hid_unbind(struct acc_dev *dev)
+{
+       hid_unregister_driver(&acc_hid_driver);
+       kill_all_hid_devices(dev);
+}
+
+static void
+acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct acc_dev  *dev = func_to_dev(f);
+       struct usb_request *req;
+       int i;
+
+       while ((req = req_get(dev, &dev->tx_idle)))
+               acc_request_free(req, dev->ep_in);
+       for (i = 0; i < RX_REQ_MAX; i++)
+               acc_request_free(dev->rx_req[i], dev->ep_out);
+
+       acc_hid_unbind(dev);
+}
+
+static void acc_start_work(struct work_struct *data)
+{
+       char *envp[2] = { "ACCESSORY=START", NULL };
+       kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static int acc_hid_init(struct acc_hid_dev *hdev)
+{
+       struct hid_device *hid;
+       int ret;
+
+       hid = hid_allocate_device();
+       if (IS_ERR(hid))
+               return PTR_ERR(hid);
+
+       hid->ll_driver = &acc_hid_ll_driver;
+       hid->dev.parent = acc_device.this_device;
+
+       hid->bus = BUS_USB;
+       hid->vendor = HID_ANY_ID;
+       hid->product = HID_ANY_ID;
+       hid->driver_data = hdev;
+       ret = hid_add_device(hid);
+       if (ret) {
+               pr_err("can't add hid device: %d\n", ret);
+               hid_destroy_device(hid);
+               return ret;
+       }
+
+       hdev->hid = hid;
+       return 0;
+}
+
+static void acc_hid_delete(struct acc_hid_dev *hid)
+{
+       kfree(hid->report_desc);
+       kfree(hid);
+}
+
+static void acc_hid_work(struct work_struct *data)
+{
+       struct acc_dev *dev = _acc_dev;
+       struct list_head        *entry, *temp;
+       struct acc_hid_dev *hid;
+       struct list_head        new_list, dead_list;
+       unsigned long flags;
+
+       INIT_LIST_HEAD(&new_list);
+
+       spin_lock_irqsave(&dev->lock, flags);
+
+       /* copy hids that are ready for initialization to new_list */
+       list_for_each_safe(entry, temp, &dev->new_hid_list) {
+               hid = list_entry(entry, struct acc_hid_dev, list);
+               if (hid->report_desc_offset == hid->report_desc_len)
+                       list_move(&hid->list, &new_list);
+       }
+
+       if (list_empty(&dev->dead_hid_list)) {
+               INIT_LIST_HEAD(&dead_list);
+       } else {
+               /* move all of dev->dead_hid_list to dead_list */
+               dead_list.prev = dev->dead_hid_list.prev;
+               dead_list.next = dev->dead_hid_list.next;
+               dead_list.next->prev = &dead_list;
+               dead_list.prev->next = &dead_list;
+               INIT_LIST_HEAD(&dev->dead_hid_list);
+       }
+
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       /* register new HID devices */
+       list_for_each_safe(entry, temp, &new_list) {
+               hid = list_entry(entry, struct acc_hid_dev, list);
+               if (acc_hid_init(hid)) {
+                       pr_err("can't add HID device %p\n", hid);
+                       acc_hid_delete(hid);
+               } else {
+                       spin_lock_irqsave(&dev->lock, flags);
+                       list_move(&hid->list, &dev->hid_list);
+                       spin_unlock_irqrestore(&dev->lock, flags);
+               }
+       }
+
+       /* remove dead HID devices */
+       list_for_each_safe(entry, temp, &dead_list) {
+               hid = list_entry(entry, struct acc_hid_dev, list);
+               list_del(&hid->list);
+               if (hid->hid)
+                       hid_destroy_device(hid->hid);
+               acc_hid_delete(hid);
+       }
+}
+
+static int acc_function_set_alt(struct usb_function *f,
+               unsigned intf, unsigned alt)
+{
+       struct acc_dev  *dev = func_to_dev(f);
+       struct usb_composite_dev *cdev = f->config->cdev;
+       int ret;
+
+       DBG(cdev, "acc_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+       ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+       if (ret)
+               return ret;
+
+       ret = usb_ep_enable(dev->ep_in);
+       if (ret)
+               return ret;
+
+       ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+       if (ret)
+               return ret;
+
+       ret = usb_ep_enable(dev->ep_out);
+       if (ret) {
+               usb_ep_disable(dev->ep_in);
+               return ret;
+       }
+
+       dev->online = 1;
+
+       /* readers may be blocked waiting for us to go online */
+       wake_up(&dev->read_wq);
+       return 0;
+}
+
+static void acc_function_disable(struct usb_function *f)
+{
+       struct acc_dev  *dev = func_to_dev(f);
+       struct usb_composite_dev        *cdev = dev->cdev;
+
+       DBG(cdev, "acc_function_disable\n");
+       acc_set_disconnected(dev);
+       usb_ep_disable(dev->ep_in);
+       usb_ep_disable(dev->ep_out);
+
+       /* readers may be blocked waiting for us to go online */
+       wake_up(&dev->read_wq);
+
+       VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int acc_bind_config(struct usb_configuration *c)
+{
+       struct acc_dev *dev = _acc_dev;
+       int ret;
+
+       printk(KERN_INFO "acc_bind_config\n");
+
+       /* allocate a string ID for our interface */
+       if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+               ret = usb_string_id(c->cdev);
+               if (ret < 0)
+                       return ret;
+               acc_string_defs[INTERFACE_STRING_INDEX].id = ret;
+               acc_interface_desc.iInterface = ret;
+       }
+
+       dev->cdev = c->cdev;
+       dev->function.name = "accessory";
+       dev->function.strings = acc_strings,
+       dev->function.fs_descriptors = fs_acc_descs;
+       dev->function.hs_descriptors = hs_acc_descs;
+       dev->function.bind = acc_function_bind;
+       dev->function.unbind = acc_function_unbind;
+       dev->function.set_alt = acc_function_set_alt;
+       dev->function.disable = acc_function_disable;
+
+       return usb_add_function(c, &dev->function);
+}
+
+static int acc_setup(void)
+{
+       struct acc_dev *dev;
+       int ret;
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       spin_lock_init(&dev->lock);
+       init_waitqueue_head(&dev->read_wq);
+       init_waitqueue_head(&dev->write_wq);
+       atomic_set(&dev->open_excl, 0);
+       INIT_LIST_HEAD(&dev->tx_idle);
+       INIT_LIST_HEAD(&dev->hid_list);
+       INIT_LIST_HEAD(&dev->new_hid_list);
+       INIT_LIST_HEAD(&dev->dead_hid_list);
+       INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
+       INIT_WORK(&dev->hid_work, acc_hid_work);
+
+       /* _acc_dev must be set before calling usb_gadget_register_driver */
+       _acc_dev = dev;
+
+       ret = misc_register(&acc_device);
+       if (ret)
+               goto err;
+
+       return 0;
+
+err:
+       kfree(dev);
+       pr_err("USB accessory gadget driver failed to initialize\n");
+       return ret;
+}
+
+static void acc_disconnect(void)
+{
+       /* unregister all HID devices if USB is disconnected */
+       kill_all_hid_devices(_acc_dev);
+}
+
+static void acc_cleanup(void)
+{
+       misc_deregister(&acc_device);
+       kfree(_acc_dev);
+       _acc_dev = NULL;
+}
diff --git a/drivers/usb/gadget/f_audio_source.c b/drivers/usb/gadget/f_audio_source.c
new file mode 100644 (file)
index 0000000..56dcf21
--- /dev/null
@@ -0,0 +1,828 @@
+/*
+ * Gadget Function Driver for USB audio source device
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/usb/audio.h>
+#include <linux/wait.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+
+#define SAMPLE_RATE 44100
+#define FRAMES_PER_MSEC (SAMPLE_RATE / 1000)
+
+#define IN_EP_MAX_PACKET_SIZE 384
+
+/* Number of requests to allocate */
+#define IN_EP_REQ_COUNT 4
+
+#define AUDIO_AC_INTERFACE     0
+#define AUDIO_AS_INTERFACE     1
+#define AUDIO_NUM_INTERFACES   2
+
+/* B.3.1  Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc = {
+       .bLength =              USB_DT_INTERFACE_SIZE,
+       .bDescriptorType =      USB_DT_INTERFACE,
+       .bNumEndpoints =        0,
+       .bInterfaceClass =      USB_CLASS_AUDIO,
+       .bInterfaceSubClass =   USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+
+#define UAC_DT_AC_HEADER_LENGTH        UAC_DT_AC_HEADER_SIZE(AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \
+       + UAC_DT_INPUT_TERMINAL_SIZE + UAC_DT_OUTPUT_TERMINAL_SIZE \
+       + UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2  Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
+       .bLength =              UAC_DT_AC_HEADER_LENGTH,
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   UAC_HEADER,
+       .bcdADC =               __constant_cpu_to_le16(0x0100),
+       .wTotalLength =         __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+       .bInCollection =        AUDIO_NUM_INTERFACES,
+       .baInterfaceNr = {
+               [0] =           AUDIO_AC_INTERFACE,
+               [1] =           AUDIO_AS_INTERFACE,
+       }
+};
+
+#define INPUT_TERMINAL_ID      1
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+       .bLength =              UAC_DT_INPUT_TERMINAL_SIZE,
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   UAC_INPUT_TERMINAL,
+       .bTerminalID =          INPUT_TERMINAL_ID,
+       .wTerminalType =        UAC_INPUT_TERMINAL_MICROPHONE,
+       .bAssocTerminal =       0,
+       .wChannelConfig =       0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID                2
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+       .bLength                = UAC_DT_FEATURE_UNIT_SIZE(0),
+       .bDescriptorType        = USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype     = UAC_FEATURE_UNIT,
+       .bUnitID                = FEATURE_UNIT_ID,
+       .bSourceID              = INPUT_TERMINAL_ID,
+       .bControlSize           = 2,
+};
+
+#define OUTPUT_TERMINAL_ID     3
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
+       .bLength                = UAC_DT_OUTPUT_TERMINAL_SIZE,
+       .bDescriptorType        = USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype     = UAC_OUTPUT_TERMINAL,
+       .bTerminalID            = OUTPUT_TERMINAL_ID,
+       .wTerminalType          = UAC_TERMINAL_STREAMING,
+       .bAssocTerminal         = FEATURE_UNIT_ID,
+       .bSourceID              = FEATURE_UNIT_ID,
+};
+
+/* B.4.1  Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+       .bLength =              USB_DT_INTERFACE_SIZE,
+       .bDescriptorType =      USB_DT_INTERFACE,
+       .bAlternateSetting =    0,
+       .bNumEndpoints =        0,
+       .bInterfaceClass =      USB_CLASS_AUDIO,
+       .bInterfaceSubClass =   USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+       .bLength =              USB_DT_INTERFACE_SIZE,
+       .bDescriptorType =      USB_DT_INTERFACE,
+       .bAlternateSetting =    1,
+       .bNumEndpoints =        1,
+       .bInterfaceClass =      USB_CLASS_AUDIO,
+       .bInterfaceSubClass =   USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2  Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_header_desc = {
+       .bLength =              UAC_DT_AS_HEADER_SIZE,
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   UAC_AS_GENERAL,
+       .bTerminalLink =        INPUT_TERMINAL_ID,
+       .bDelay =               1,
+       .wFormatTag =           UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+       .bLength =              UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   UAC_FORMAT_TYPE,
+       .bFormatType =          UAC_FORMAT_TYPE_I,
+       .bSubframeSize =        2,
+       .bBitResolution =       16,
+       .bSamFreqType =         1,
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor hs_as_in_ep_desc  = {
+       .bLength =              USB_DT_ENDPOINT_AUDIO_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_IN,
+       .bmAttributes =         USB_ENDPOINT_SYNC_SYNC
+                               | USB_ENDPOINT_XFER_ISOC,
+       .wMaxPacketSize =       __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+       .bInterval =            4, /* poll 1 per millisecond */
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor fs_as_in_ep_desc  = {
+       .bLength =              USB_DT_ENDPOINT_AUDIO_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_IN,
+       .bmAttributes =         USB_ENDPOINT_SYNC_SYNC
+                               | USB_ENDPOINT_XFER_ISOC,
+       .wMaxPacketSize =       __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+       .bInterval =            1, /* poll 1 per millisecond */
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
+       .bLength =              UAC_ISO_ENDPOINT_DESC_SIZE,
+       .bDescriptorType =      USB_DT_CS_ENDPOINT,
+       .bDescriptorSubtype =   UAC_EP_GENERAL,
+       .bmAttributes =         1,
+       .bLockDelayUnits =      1,
+       .wLockDelay =           __constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *hs_audio_desc[] = {
+       (struct usb_descriptor_header *)&ac_interface_desc,
+       (struct usb_descriptor_header *)&ac_header_desc,
+
+       (struct usb_descriptor_header *)&input_terminal_desc,
+       (struct usb_descriptor_header *)&output_terminal_desc,
+       (struct usb_descriptor_header *)&feature_unit_desc,
+
+       (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+       (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+       (struct usb_descriptor_header *)&as_header_desc,
+
+       (struct usb_descriptor_header *)&as_type_i_desc,
+
+       (struct usb_descriptor_header *)&hs_as_in_ep_desc,
+       (struct usb_descriptor_header *)&as_iso_in_desc,
+       NULL,
+};
+
+static struct usb_descriptor_header *fs_audio_desc[] = {
+       (struct usb_descriptor_header *)&ac_interface_desc,
+       (struct usb_descriptor_header *)&ac_header_desc,
+
+       (struct usb_descriptor_header *)&input_terminal_desc,
+       (struct usb_descriptor_header *)&output_terminal_desc,
+       (struct usb_descriptor_header *)&feature_unit_desc,
+
+       (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+       (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+       (struct usb_descriptor_header *)&as_header_desc,
+
+       (struct usb_descriptor_header *)&as_type_i_desc,
+
+       (struct usb_descriptor_header *)&fs_as_in_ep_desc,
+       (struct usb_descriptor_header *)&as_iso_in_desc,
+       NULL,
+};
+
+static struct snd_pcm_hardware audio_hw_info = {
+       .info =                 SNDRV_PCM_INFO_MMAP |
+                               SNDRV_PCM_INFO_MMAP_VALID |
+                               SNDRV_PCM_INFO_BATCH |
+                               SNDRV_PCM_INFO_INTERLEAVED |
+                               SNDRV_PCM_INFO_BLOCK_TRANSFER,
+
+       .formats                = SNDRV_PCM_FMTBIT_S16_LE,
+       .channels_min           = 2,
+       .channels_max           = 2,
+       .rate_min               = SAMPLE_RATE,
+       .rate_max               = SAMPLE_RATE,
+
+       .buffer_bytes_max =     1024 * 1024,
+       .period_bytes_min =     64,
+       .period_bytes_max =     512 * 1024,
+       .periods_min =          2,
+       .periods_max =          1024,
+};
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_config {
+       int     card;
+       int     device;
+};
+
+struct audio_dev {
+       struct usb_function             func;
+       struct snd_card                 *card;
+       struct snd_pcm                  *pcm;
+       struct snd_pcm_substream *substream;
+
+       struct list_head                idle_reqs;
+       struct usb_ep                   *in_ep;
+
+       spinlock_t                      lock;
+
+       /* beginning, end and current position in our buffer */
+       void                            *buffer_start;
+       void                            *buffer_end;
+       void                            *buffer_pos;
+
+       /* byte size of a "period" */
+       unsigned int                    period;
+       /* bytes sent since last call to snd_pcm_period_elapsed */
+       unsigned int                    period_offset;
+       /* time we started playing */
+       ktime_t                         start_time;
+       /* number of frames sent since start_time */
+       s64                             frames_sent;
+};
+
+static inline struct audio_dev *func_to_audio(struct usb_function *f)
+{
+       return container_of(f, struct audio_dev, func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_request *audio_request_new(struct usb_ep *ep, int buffer_size)
+{
+       struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+       if (!req)
+               return NULL;
+
+       req->buf = kmalloc(buffer_size, GFP_KERNEL);
+       if (!req->buf) {
+               usb_ep_free_request(ep, req);
+               return NULL;
+       }
+       req->length = buffer_size;
+       return req;
+}
+
+static void audio_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+       if (req) {
+               kfree(req->buf);
+               usb_ep_free_request(ep, req);
+       }
+}
+
+static void audio_req_put(struct audio_dev *audio, struct usb_request *req)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&audio->lock, flags);
+       list_add_tail(&req->list, &audio->idle_reqs);
+       spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static struct usb_request *audio_req_get(struct audio_dev *audio)
+{
+       unsigned long flags;
+       struct usb_request *req;
+
+       spin_lock_irqsave(&audio->lock, flags);
+       if (list_empty(&audio->idle_reqs)) {
+               req = 0;
+       } else {
+               req = list_first_entry(&audio->idle_reqs, struct usb_request,
+                               list);
+               list_del(&req->list);
+       }
+       spin_unlock_irqrestore(&audio->lock, flags);
+       return req;
+}
+
+/* send the appropriate number of packets to match our bitrate */
+static void audio_send(struct audio_dev *audio)
+{
+       struct snd_pcm_runtime *runtime;
+       struct usb_request *req;
+       int length, length1, length2, ret;
+       s64 msecs;
+       s64 frames;
+       ktime_t now;
+
+       /* audio->substream will be null if we have been closed */
+       if (!audio->substream)
+               return;
+       /* audio->buffer_pos will be null if we have been stopped */
+       if (!audio->buffer_pos)
+               return;
+
+       runtime = audio->substream->runtime;
+
+       /* compute number of frames to send */
+       now = ktime_get();
+       msecs = ktime_to_ns(now) - ktime_to_ns(audio->start_time);
+       do_div(msecs, 1000000);
+       frames = msecs * SAMPLE_RATE;
+       do_div(frames, 1000);
+
+       /* Readjust our frames_sent if we fall too far behind.
+        * If we get too far behind it is better to drop some frames than
+        * to keep sending data too fast in an attempt to catch up.
+        */
+       if (frames - audio->frames_sent > 10 * FRAMES_PER_MSEC)
+               audio->frames_sent = frames - FRAMES_PER_MSEC;
+
+       frames -= audio->frames_sent;
+
+       /* We need to send something to keep the pipeline going */
+       if (frames <= 0)
+               frames = FRAMES_PER_MSEC;
+
+       while (frames > 0) {
+               req = audio_req_get(audio);
+               if (!req)
+                       break;
+
+               length = frames_to_bytes(runtime, frames);
+               if (length > IN_EP_MAX_PACKET_SIZE)
+                       length = IN_EP_MAX_PACKET_SIZE;
+
+               if (audio->buffer_pos + length > audio->buffer_end)
+                       length1 = audio->buffer_end - audio->buffer_pos;
+               else
+                       length1 = length;
+               memcpy(req->buf, audio->buffer_pos, length1);
+               if (length1 < length) {
+                       /* Wrap around and copy remaining length
+                        * at beginning of buffer.
+                        */
+                       length2 = length - length1;
+                       memcpy(req->buf + length1, audio->buffer_start,
+                                       length2);
+                       audio->buffer_pos = audio->buffer_start + length2;
+               } else {
+                       audio->buffer_pos += length1;
+                       if (audio->buffer_pos >= audio->buffer_end)
+                               audio->buffer_pos = audio->buffer_start;
+               }
+
+               req->length = length;
+               ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
+               if (ret < 0) {
+                       pr_err("usb_ep_queue failed ret: %d\n", ret);
+                       audio_req_put(audio, req);
+                       break;
+               }
+
+               frames -= bytes_to_frames(runtime, length);
+               audio->frames_sent += bytes_to_frames(runtime, length);
+       }
+}
+
+static void audio_control_complete(struct usb_ep *ep, struct usb_request *req)
+{
+       /* nothing to do here */
+}
+
+static void audio_data_complete(struct usb_ep *ep, struct usb_request *req)
+{
+       struct audio_dev *audio = req->context;
+
+       pr_debug("audio_data_complete req->status %d req->actual %d\n",
+               req->status, req->actual);
+
+       audio_req_put(audio, req);
+
+       if (!audio->buffer_start || req->status)
+               return;
+
+       audio->period_offset += req->actual;
+       if (audio->period_offset >= audio->period) {
+               snd_pcm_period_elapsed(audio->substream);
+               audio->period_offset = 0;
+       }
+       audio_send(audio);
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+               const struct usb_ctrlrequest *ctrl)
+{
+       int value = -EOPNOTSUPP;
+       u16 ep = le16_to_cpu(ctrl->wIndex);
+       u16 len = le16_to_cpu(ctrl->wLength);
+       u16 w_value = le16_to_cpu(ctrl->wValue);
+
+       pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+                       ctrl->bRequest, w_value, len, ep);
+
+       switch (ctrl->bRequest) {
+       case UAC_SET_CUR:
+       case UAC_SET_MIN:
+       case UAC_SET_MAX:
+       case UAC_SET_RES:
+               value = len;
+               break;
+       default:
+               break;
+       }
+
+       return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+               const struct usb_ctrlrequest *ctrl)
+{
+       struct usb_composite_dev *cdev = f->config->cdev;
+       int value = -EOPNOTSUPP;
+       u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+       u16 len = le16_to_cpu(ctrl->wLength);
+       u16 w_value = le16_to_cpu(ctrl->wValue);
+       u8 *buf = cdev->req->buf;
+
+       pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+                       ctrl->bRequest, w_value, len, ep);
+
+       if (w_value == UAC_EP_CS_ATTR_SAMPLE_RATE << 8) {
+               switch (ctrl->bRequest) {
+               case UAC_GET_CUR:
+               case UAC_GET_MIN:
+               case UAC_GET_MAX:
+               case UAC_GET_RES:
+                       /* return our sample rate */
+                       buf[0] = (u8)SAMPLE_RATE;
+                       buf[1] = (u8)(SAMPLE_RATE >> 8);
+                       buf[2] = (u8)(SAMPLE_RATE >> 16);
+                       value = 3;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return value;
+}
+
+static int
+audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+       struct usb_composite_dev *cdev = f->config->cdev;
+       struct usb_request *req = cdev->req;
+       int value = -EOPNOTSUPP;
+       u16 w_index = le16_to_cpu(ctrl->wIndex);
+       u16 w_value = le16_to_cpu(ctrl->wValue);
+       u16 w_length = le16_to_cpu(ctrl->wLength);
+
+       /* composite driver infrastructure handles everything; interface
+        * activation uses set_alt().
+        */
+       switch (ctrl->bRequestType) {
+       case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+               value = audio_set_endpoint_req(f, ctrl);
+               break;
+
+       case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+               value = audio_get_endpoint_req(f, ctrl);
+               break;
+       }
+
+       /* respond with data transfer or status phase? */
+       if (value >= 0) {
+               pr_debug("audio req%02x.%02x v%04x i%04x l%d\n",
+                       ctrl->bRequestType, ctrl->bRequest,
+                       w_value, w_index, w_length);
+               req->zero = 0;
+               req->length = value;
+               req->complete = audio_control_complete;
+               value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+               if (value < 0)
+                       pr_err("audio response on err %d\n", value);
+       }
+
+       /* device either stalls (value < 0) or reports success */
+       return value;
+}
+
+static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+       struct audio_dev *audio = func_to_audio(f);
+       struct usb_composite_dev *cdev = f->config->cdev;
+       int ret;
+
+       pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
+
+       ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
+       if (ret)
+               return ret;
+
+       usb_ep_enable(audio->in_ep);
+       return 0;
+}
+
+static void audio_disable(struct usb_function *f)
+{
+       struct audio_dev        *audio = func_to_audio(f);
+
+       pr_debug("audio_disable\n");
+       usb_ep_disable(audio->in_ep);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void audio_build_desc(struct audio_dev *audio)
+{
+       u8 *sam_freq;
+       int rate;
+
+       /* Set channel numbers */
+       input_terminal_desc.bNrChannels = 2;
+       as_type_i_desc.bNrChannels = 2;
+
+       /* Set sample rates */
+       rate = SAMPLE_RATE;
+       sam_freq = as_type_i_desc.tSamFreq[0];
+       memcpy(sam_freq, &rate, 3);
+}
+
+/* audio function driver setup/binding */
+static int
+audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct usb_composite_dev *cdev = c->cdev;
+       struct audio_dev *audio = func_to_audio(f);
+       int status;
+       struct usb_ep *ep;
+       struct usb_request *req;
+       int i;
+
+       audio_build_desc(audio);
+
+       /* allocate instance-specific interface IDs, and patch descriptors */
+       status = usb_interface_id(c, f);
+       if (status < 0)
+               goto fail;
+       ac_interface_desc.bInterfaceNumber = status;
+
+       status = usb_interface_id(c, f);
+       if (status < 0)
+               goto fail;
+       as_interface_alt_0_desc.bInterfaceNumber = status;
+       as_interface_alt_1_desc.bInterfaceNumber = status;
+
+       status = -ENODEV;
+
+       /* allocate our endpoint */
+       ep = usb_ep_autoconfig(cdev->gadget, &fs_as_in_ep_desc);
+       if (!ep)
+               goto fail;
+       audio->in_ep = ep;
+       ep->driver_data = audio; /* claim */
+
+       if (gadget_is_dualspeed(c->cdev->gadget))
+               hs_as_in_ep_desc.bEndpointAddress =
+                       fs_as_in_ep_desc.bEndpointAddress;
+
+       f->fs_descriptors = fs_audio_desc;
+       f->hs_descriptors = hs_audio_desc;
+
+       for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) {
+               req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE);
+               if (req) {
+                       req->context = audio;
+                       req->complete = audio_data_complete;
+                       audio_req_put(audio, req);
+               } else
+                       status = -ENOMEM;
+       }
+
+fail:
+       return status;
+}
+
+static void
+audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct audio_dev *audio = func_to_audio(f);
+       struct usb_request *req;
+
+       while ((req = audio_req_get(audio)))
+               audio_request_free(req, audio->in_ep);
+
+       snd_card_free_when_closed(audio->card);
+       audio->card = NULL;
+       audio->pcm = NULL;
+       audio->substream = NULL;
+       audio->in_ep = NULL;
+}
+
+static void audio_pcm_playback_start(struct audio_dev *audio)
+{
+       audio->start_time = ktime_get();
+       audio->frames_sent = 0;
+       audio_send(audio);
+}
+
+static void audio_pcm_playback_stop(struct audio_dev *audio)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&audio->lock, flags);
+       audio->buffer_start = 0;
+       audio->buffer_end = 0;
+       audio->buffer_pos = 0;
+       spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static int audio_pcm_open(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct audio_dev *audio = substream->private_data;
+
+       runtime->private_data = audio;
+       runtime->hw = audio_hw_info;
+       snd_pcm_limit_hw_rates(runtime);
+       runtime->hw.channels_max = 2;
+
+       audio->substream = substream;
+       return 0;
+}
+
+static int audio_pcm_close(struct snd_pcm_substream *substream)
+{
+       struct audio_dev *audio = substream->private_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&audio->lock, flags);
+       audio->substream = NULL;
+       spin_unlock_irqrestore(&audio->lock, flags);
+
+       return 0;
+}
+
+static int audio_pcm_hw_params(struct snd_pcm_substream *substream,
+                               struct snd_pcm_hw_params *params)
+{
+       unsigned int channels = params_channels(params);
+       unsigned int rate = params_rate(params);
+
+       if (rate != SAMPLE_RATE)
+               return -EINVAL;
+       if (channels != 2)
+               return -EINVAL;
+
+       return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+               params_buffer_bytes(params));
+}
+
+static int audio_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+       return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int audio_pcm_prepare(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct audio_dev *audio = runtime->private_data;
+
+       audio->period = snd_pcm_lib_period_bytes(substream);
+       audio->period_offset = 0;
+       audio->buffer_start = runtime->dma_area;
+       audio->buffer_end = audio->buffer_start
+               + snd_pcm_lib_buffer_bytes(substream);
+       audio->buffer_pos = audio->buffer_start;
+
+       return 0;
+}
+
+static snd_pcm_uframes_t audio_pcm_pointer(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct audio_dev *audio = runtime->private_data;
+       ssize_t bytes = audio->buffer_pos - audio->buffer_start;
+
+       /* return offset of next frame to fill in our buffer */
+       return bytes_to_frames(runtime, bytes);
+}
+
+static int audio_pcm_playback_trigger(struct snd_pcm_substream *substream,
+                                       int cmd)
+{
+       struct audio_dev *audio = substream->runtime->private_data;
+       int ret = 0;
+
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+       case SNDRV_PCM_TRIGGER_RESUME:
+               audio_pcm_playback_start(audio);
+               break;
+
+       case SNDRV_PCM_TRIGGER_STOP:
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+               audio_pcm_playback_stop(audio);
+               break;
+
+       default:
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+static struct audio_dev _audio_dev = {
+       .func = {
+               .name = "audio_source",
+               .bind = audio_bind,
+               .unbind = audio_unbind,
+               .set_alt = audio_set_alt,
+               .setup = audio_setup,
+               .disable = audio_disable,
+       },
+       .lock = __SPIN_LOCK_UNLOCKED(_audio_dev.lock),
+       .idle_reqs = LIST_HEAD_INIT(_audio_dev.idle_reqs),
+};
+
+static struct snd_pcm_ops audio_playback_ops = {
+       .open           = audio_pcm_open,
+       .close          = audio_pcm_close,
+       .ioctl          = snd_pcm_lib_ioctl,
+       .hw_params      = audio_pcm_hw_params,
+       .hw_free        = audio_pcm_hw_free,
+       .prepare        = audio_pcm_prepare,
+       .trigger        = audio_pcm_playback_trigger,
+       .pointer        = audio_pcm_pointer,
+};
+
+int audio_source_bind_config(struct usb_configuration *c,
+               struct audio_source_config *config)
+{
+       struct audio_dev *audio;
+       struct snd_card *card;
+       struct snd_pcm *pcm;
+       int err;
+
+       config->card = -1;
+       config->device = -1;
+
+       audio = &_audio_dev;
+
+       err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+                       THIS_MODULE, 0, &card);
+       if (err)
+               return err;
+
+       snd_card_set_dev(card, &c->cdev->gadget->dev);
+
+       err = snd_pcm_new(card, "USB audio source", 0, 1, 0, &pcm);
+       if (err)
+               goto pcm_fail;
+       pcm->private_data = audio;
+       pcm->info_flags = 0;
+       audio->pcm = pcm;
+
+       strlcpy(pcm->name, "USB gadget audio", sizeof(pcm->name));
+
+       snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &audio_playback_ops);
+       snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+                               NULL, 0, 64 * 1024);
+
+       strlcpy(card->driver, "audio_source", sizeof(card->driver));
+       strlcpy(card->shortname, card->driver, sizeof(card->shortname));
+       strlcpy(card->longname, "USB accessory audio source",
+               sizeof(card->longname));
+
+       err = snd_card_register(card);
+       if (err)
+               goto register_fail;
+
+       err = usb_add_function(c, &audio->func);
+       if (err)
+               goto add_fail;
+
+       config->card = pcm->card->number;
+       config->device = pcm->device;
+       audio->card = card;
+       return 0;
+
+add_fail:
+register_fail:
+pcm_fail:
+       snd_card_free(audio->card);
+       return err;
+}
index 84219f656051f41641c2afe192a60a232650b9d0..c36f660dcc2e67a6a614c9b37ae6b7d249495a2a 100644 (file)
@@ -1563,7 +1563,12 @@ static int ffs_func_eps_enable(struct ffs_function *func)
        spin_lock_irqsave(&func->ffs->eps_lock, flags);
        do {
                struct usb_endpoint_descriptor *ds;
-               ds = ep->descs[ep->descs[1] ? 1 : 0];
+               int desc_idx = ffs->gadget->speed == USB_SPEED_HIGH ? 1 : 0;
+               ds = ep->descs[desc_idx];
+               if (!ds) {
+                       ret = -EINVAL;
+                       break;
+               }
 
                ep->ep->driver_data = ep;
                ep->ep->desc = ds;
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
new file mode 100644 (file)
index 0000000..620aeaa
--- /dev/null
@@ -0,0 +1,1287 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_mtp.h>
+
+#define MTP_BULK_BUFFER_SIZE       16384
+#define INTR_BUFFER_SIZE           28
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX 0
+
+/* values for mtp_dev.state */
+#define STATE_OFFLINE               0   /* initial state, disconnected */
+#define STATE_READY                 1   /* ready for userspace calls */
+#define STATE_BUSY                  2   /* processing userspace calls */
+#define STATE_CANCELED              3   /* transaction canceled by host */
+#define STATE_ERROR                 4   /* error from completion routine */
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+#define INTR_REQ_MAX 5
+
+/* ID for Microsoft MTP OS String */
+#define MTP_OS_STRING_ID   0xEE
+
+/* MTP class reqeusts */
+#define MTP_REQ_CANCEL              0x64
+#define MTP_REQ_GET_EXT_EVENT_DATA  0x65
+#define MTP_REQ_RESET               0x66
+#define MTP_REQ_GET_DEVICE_STATUS   0x67
+
+/* constants for device status */
+#define MTP_RESPONSE_OK             0x2001
+#define MTP_RESPONSE_DEVICE_BUSY    0x2019
+
+static const char mtp_shortname[] = "mtp_usb";
+
+struct mtp_dev {
+       struct usb_function function;
+       struct usb_composite_dev *cdev;
+       spinlock_t lock;
+
+       struct usb_ep *ep_in;
+       struct usb_ep *ep_out;
+       struct usb_ep *ep_intr;
+
+       int state;
+
+       /* synchronize access to our device file */
+       atomic_t open_excl;
+       /* to enforce only one ioctl at a time */
+       atomic_t ioctl_excl;
+
+       struct list_head tx_idle;
+       struct list_head intr_idle;
+
+       wait_queue_head_t read_wq;
+       wait_queue_head_t write_wq;
+       wait_queue_head_t intr_wq;
+       struct usb_request *rx_req[RX_REQ_MAX];
+       int rx_done;
+
+       /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
+        * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
+        */
+       struct workqueue_struct *wq;
+       struct work_struct send_file_work;
+       struct work_struct receive_file_work;
+       struct file *xfer_file;
+       loff_t xfer_file_offset;
+       int64_t xfer_file_length;
+       unsigned xfer_send_header;
+       uint16_t xfer_command;
+       uint32_t xfer_transaction_id;
+       int xfer_result;
+};
+
+static struct usb_interface_descriptor mtp_interface_desc = {
+       .bLength                = USB_DT_INTERFACE_SIZE,
+       .bDescriptorType        = USB_DT_INTERFACE,
+       .bInterfaceNumber       = 0,
+       .bNumEndpoints          = 3,
+       .bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
+       .bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
+       .bInterfaceProtocol     = 0,
+};
+
+static struct usb_interface_descriptor ptp_interface_desc = {
+       .bLength                = USB_DT_INTERFACE_SIZE,
+       .bDescriptorType        = USB_DT_INTERFACE,
+       .bInterfaceNumber       = 0,
+       .bNumEndpoints          = 3,
+       .bInterfaceClass        = USB_CLASS_STILL_IMAGE,
+       .bInterfaceSubClass     = 1,
+       .bInterfaceProtocol     = 1,
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_IN,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+       .wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_OUT,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+       .wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_IN,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_OUT,
+       .bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_intr_desc = {
+       .bLength                = USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType        = USB_DT_ENDPOINT,
+       .bEndpointAddress       = USB_DIR_IN,
+       .bmAttributes           = USB_ENDPOINT_XFER_INT,
+       .wMaxPacketSize         = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
+       .bInterval              = 6,
+};
+
+static struct usb_descriptor_header *fs_mtp_descs[] = {
+       (struct usb_descriptor_header *) &mtp_interface_desc,
+       (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+       (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+       (struct usb_descriptor_header *) &mtp_intr_desc,
+       NULL,
+};
+
+static struct usb_descriptor_header *hs_mtp_descs[] = {
+       (struct usb_descriptor_header *) &mtp_interface_desc,
+       (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+       (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+       (struct usb_descriptor_header *) &mtp_intr_desc,
+       NULL,
+};
+
+static struct usb_descriptor_header *fs_ptp_descs[] = {
+       (struct usb_descriptor_header *) &ptp_interface_desc,
+       (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+       (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+       (struct usb_descriptor_header *) &mtp_intr_desc,
+       NULL,
+};
+
+static struct usb_descriptor_header *hs_ptp_descs[] = {
+       (struct usb_descriptor_header *) &ptp_interface_desc,
+       (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+       (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+       (struct usb_descriptor_header *) &mtp_intr_desc,
+       NULL,
+};
+
+static struct usb_string mtp_string_defs[] = {
+       /* Naming interface "MTP" so libmtp will recognize us */
+       [INTERFACE_STRING_INDEX].s      = "MTP",
+       {  },   /* end of list */
+};
+
+static struct usb_gadget_strings mtp_string_table = {
+       .language               = 0x0409,       /* en-US */
+       .strings                = mtp_string_defs,
+};
+
+static struct usb_gadget_strings *mtp_strings[] = {
+       &mtp_string_table,
+       NULL,
+};
+
+/* Microsoft MTP OS String */
+static u8 mtp_os_string[] = {
+       18, /* sizeof(mtp_os_string) */
+       USB_DT_STRING,
+       /* Signature field: "MSFT100" */
+       'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
+       /* vendor code */
+       1,
+       /* padding */
+       0
+};
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mtp_ext_config_desc_header {
+       __le32  dwLength;
+       __u16   bcdVersion;
+       __le16  wIndex;
+       __u8    bCount;
+       __u8    reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mtp_ext_config_desc_function {
+       __u8    bFirstInterfaceNumber;
+       __u8    bInterfaceCount;
+       __u8    compatibleID[8];
+       __u8    subCompatibleID[8];
+       __u8    reserved[6];
+};
+
+/* MTP Extended Configuration Descriptor */
+struct {
+       struct mtp_ext_config_desc_header       header;
+       struct mtp_ext_config_desc_function    function;
+} mtp_ext_config_desc = {
+       .header = {
+               .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
+               .bcdVersion = __constant_cpu_to_le16(0x0100),
+               .wIndex = __constant_cpu_to_le16(4),
+               .bCount = __constant_cpu_to_le16(1),
+       },
+       .function = {
+               .bFirstInterfaceNumber = 0,
+               .bInterfaceCount = 1,
+               .compatibleID = { 'M', 'T', 'P' },
+       },
+};
+
+struct mtp_device_status {
+       __le16  wLength;
+       __le16  wCode;
+};
+
+struct mtp_data_header {
+       /* length of packet, including this header */
+       __le32  length;
+       /* container type (2 for data packet) */
+       __le16  type;
+       /* MTP command code */
+       __le16  command;
+       /* MTP transaction ID */
+       __le32  transaction_id;
+};
+
+/* temporary variable used between mtp_open() and mtp_gadget_bind() */
+static struct mtp_dev *_mtp_dev;
+
+static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
+{
+       return container_of(f, struct mtp_dev, function);
+}
+
+static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
+{
+       struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+       if (!req)
+               return NULL;
+
+       /* now allocate buffers for the requests */
+       req->buf = kmalloc(buffer_size, GFP_KERNEL);
+       if (!req->buf) {
+               usb_ep_free_request(ep, req);
+               return NULL;
+       }
+
+       return req;
+}
+
+static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+       if (req) {
+               kfree(req->buf);
+               usb_ep_free_request(ep, req);
+       }
+}
+
+static inline int mtp_lock(atomic_t *excl)
+{
+       if (atomic_inc_return(excl) == 1) {
+               return 0;
+       } else {
+               atomic_dec(excl);
+               return -1;
+       }
+}
+
+static inline void mtp_unlock(atomic_t *excl)
+{
+       atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
+               struct usb_request *req)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       list_add_tail(&req->list, head);
+       spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request
+*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
+{
+       unsigned long flags;
+       struct usb_request *req;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       if (list_empty(head)) {
+               req = 0;
+       } else {
+               req = list_first_entry(head, struct usb_request, list);
+               list_del(&req->list);
+       }
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return req;
+}
+
+static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+       struct mtp_dev *dev = _mtp_dev;
+
+       if (req->status != 0)
+               dev->state = STATE_ERROR;
+
+       mtp_req_put(dev, &dev->tx_idle, req);
+
+       wake_up(&dev->write_wq);
+}
+
+static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+       struct mtp_dev *dev = _mtp_dev;
+
+       dev->rx_done = 1;
+       if (req->status != 0)
+               dev->state = STATE_ERROR;
+
+       wake_up(&dev->read_wq);
+}
+
+static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
+{
+       struct mtp_dev *dev = _mtp_dev;
+
+       if (req->status != 0)
+               dev->state = STATE_ERROR;
+
+       mtp_req_put(dev, &dev->intr_idle, req);
+
+       wake_up(&dev->intr_wq);
+}
+
+static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
+                               struct usb_endpoint_descriptor *in_desc,
+                               struct usb_endpoint_descriptor *out_desc,
+                               struct usb_endpoint_descriptor *intr_desc)
+{
+       struct usb_composite_dev *cdev = dev->cdev;
+       struct usb_request *req;
+       struct usb_ep *ep;
+       int i;
+
+       DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+       ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+       if (!ep) {
+               DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+               return -ENODEV;
+       }
+       DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+       ep->driver_data = dev;          /* claim the endpoint */
+       dev->ep_in = ep;
+
+       ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+       if (!ep) {
+               DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+               return -ENODEV;
+       }
+       DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+       ep->driver_data = dev;          /* claim the endpoint */
+       dev->ep_out = ep;
+
+       ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
+       if (!ep) {
+               DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
+               return -ENODEV;
+       }
+       DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
+       ep->driver_data = dev;          /* claim the endpoint */
+       dev->ep_intr = ep;
+
+       /* now allocate requests for our endpoints */
+       for (i = 0; i < TX_REQ_MAX; i++) {
+               req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
+               if (!req)
+                       goto fail;
+               req->complete = mtp_complete_in;
+               mtp_req_put(dev, &dev->tx_idle, req);
+       }
+       for (i = 0; i < RX_REQ_MAX; i++) {
+               req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
+               if (!req)
+                       goto fail;
+               req->complete = mtp_complete_out;
+               dev->rx_req[i] = req;
+       }
+       for (i = 0; i < INTR_REQ_MAX; i++) {
+               req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
+               if (!req)
+                       goto fail;
+               req->complete = mtp_complete_intr;
+               mtp_req_put(dev, &dev->intr_idle, req);
+       }
+
+       return 0;
+
+fail:
+       printk(KERN_ERR "mtp_bind() could not allocate requests\n");
+       return -1;
+}
+
+static ssize_t mtp_read(struct file *fp, char __user *buf,
+       size_t count, loff_t *pos)
+{
+       struct mtp_dev *dev = fp->private_data;
+       struct usb_composite_dev *cdev = dev->cdev;
+       struct usb_request *req;
+       ssize_t r = count;
+       unsigned xfer;
+       int ret = 0;
+
+       DBG(cdev, "mtp_read(%zu)\n", count);
+
+       if (count > MTP_BULK_BUFFER_SIZE)
+               return -EINVAL;
+
+       /* we will block until we're online */
+       DBG(cdev, "mtp_read: waiting for online state\n");
+       ret = wait_event_interruptible(dev->read_wq,
+               dev->state != STATE_OFFLINE);
+       if (ret < 0) {
+               r = ret;
+               goto done;
+       }
+       spin_lock_irq(&dev->lock);
+       if (dev->state == STATE_CANCELED) {
+               /* report cancelation to userspace */
+               dev->state = STATE_READY;
+               spin_unlock_irq(&dev->lock);
+               return -ECANCELED;
+       }
+       dev->state = STATE_BUSY;
+       spin_unlock_irq(&dev->lock);
+
+requeue_req:
+       /* queue a request */
+       req = dev->rx_req[0];
+       req->length = count;
+       dev->rx_done = 0;
+       ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+       if (ret < 0) {
+               r = -EIO;
+               goto done;
+       } else {
+               DBG(cdev, "rx %p queue\n", req);
+       }
+
+       /* wait for a request to complete */
+       ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+       if (ret < 0) {
+               r = ret;
+               usb_ep_dequeue(dev->ep_out, req);
+               goto done;
+       }
+       if (dev->state == STATE_BUSY) {
+               /* If we got a 0-len packet, throw it back and try again. */
+               if (req->actual == 0)
+                       goto requeue_req;
+
+               DBG(cdev, "rx %p %d\n", req, req->actual);
+               xfer = (req->actual < count) ? req->actual : count;
+               r = xfer;
+               if (copy_to_user(buf, req->buf, xfer))
+                       r = -EFAULT;
+       } else
+               r = -EIO;
+
+done:
+       spin_lock_irq(&dev->lock);
+       if (dev->state == STATE_CANCELED)
+               r = -ECANCELED;
+       else if (dev->state != STATE_OFFLINE)
+               dev->state = STATE_READY;
+       spin_unlock_irq(&dev->lock);
+
+       DBG(cdev, "mtp_read returning %zd\n", r);
+       return r;
+}
+
+static ssize_t mtp_write(struct file *fp, const char __user *buf,
+       size_t count, loff_t *pos)
+{
+       struct mtp_dev *dev = fp->private_data;
+       struct usb_composite_dev *cdev = dev->cdev;
+       struct usb_request *req = 0;
+       ssize_t r = count;
+       unsigned xfer;
+       int sendZLP = 0;
+       int ret;
+
+       DBG(cdev, "mtp_write(%zu)\n", count);
+
+       spin_lock_irq(&dev->lock);
+       if (dev->state == STATE_CANCELED) {
+               /* report cancelation to userspace */
+               dev->state = STATE_READY;
+               spin_unlock_irq(&dev->lock);
+               return -ECANCELED;
+       }
+       if (dev->state == STATE_OFFLINE) {
+               spin_unlock_irq(&dev->lock);
+               return -ENODEV;
+       }
+       dev->state = STATE_BUSY;
+       spin_unlock_irq(&dev->lock);
+
+       /* we need to send a zero length packet to signal the end of transfer
+        * if the transfer size is aligned to a packet boundary.
+        */
+       if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+               sendZLP = 1;
+
+       while (count > 0 || sendZLP) {
+               /* so we exit after sending ZLP */
+               if (count == 0)
+                       sendZLP = 0;
+
+               if (dev->state != STATE_BUSY) {
+                       DBG(cdev, "mtp_write dev->error\n");
+                       r = -EIO;
+                       break;
+               }
+
+               /* get an idle tx request to use */
+               req = 0;
+               ret = wait_event_interruptible(dev->write_wq,
+                       ((req = mtp_req_get(dev, &dev->tx_idle))
+                               || dev->state != STATE_BUSY));
+               if (!req) {
+                       r = ret;
+                       break;
+               }
+
+               if (count > MTP_BULK_BUFFER_SIZE)
+                       xfer = MTP_BULK_BUFFER_SIZE;
+               else
+                       xfer = count;
+               if (xfer && copy_from_user(req->buf, buf, xfer)) {
+                       r = -EFAULT;
+                       break;
+               }
+
+               req->length = xfer;
+               ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+               if (ret < 0) {
+                       DBG(cdev, "mtp_write: xfer error %d\n", ret);
+                       r = -EIO;
+                       break;
+               }
+
+               buf += xfer;
+               count -= xfer;
+
+               /* zero this so we don't try to free it on error exit */
+               req = 0;
+       }
+
+       if (req)
+               mtp_req_put(dev, &dev->tx_idle, req);
+
+       spin_lock_irq(&dev->lock);
+       if (dev->state == STATE_CANCELED)
+               r = -ECANCELED;
+       else if (dev->state != STATE_OFFLINE)
+               dev->state = STATE_READY;
+       spin_unlock_irq(&dev->lock);
+
+       DBG(cdev, "mtp_write returning %zd\n", r);
+       return r;
+}
+
+/* read from a local file and write to USB */
+static void send_file_work(struct work_struct *data)
+{
+       struct mtp_dev *dev = container_of(data, struct mtp_dev,
+                                               send_file_work);
+       struct usb_composite_dev *cdev = dev->cdev;
+       struct usb_request *req = 0;
+       struct mtp_data_header *header;
+       struct file *filp;
+       loff_t offset;
+       int64_t count;
+       int xfer, ret, hdr_size;
+       int r = 0;
+       int sendZLP = 0;
+
+       /* read our parameters */
+       smp_rmb();
+       filp = dev->xfer_file;
+       offset = dev->xfer_file_offset;
+       count = dev->xfer_file_length;
+
+       DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
+
+       if (dev->xfer_send_header) {
+               hdr_size = sizeof(struct mtp_data_header);
+               count += hdr_size;
+       } else {
+               hdr_size = 0;
+       }
+
+       /* we need to send a zero length packet to signal the end of transfer
+        * if the transfer size is aligned to a packet boundary.
+        */
+       if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+               sendZLP = 1;
+
+       while (count > 0 || sendZLP) {
+               /* so we exit after sending ZLP */
+               if (count == 0)
+                       sendZLP = 0;
+
+               /* get an idle tx request to use */
+               req = 0;
+               ret = wait_event_interruptible(dev->write_wq,
+                       (req = mtp_req_get(dev, &dev->tx_idle))
+                       || dev->state != STATE_BUSY);
+               if (dev->state == STATE_CANCELED) {
+                       r = -ECANCELED;
+                       break;
+               }
+               if (!req) {
+                       r = ret;
+                       break;
+               }
+
+               if (count > MTP_BULK_BUFFER_SIZE)
+                       xfer = MTP_BULK_BUFFER_SIZE;
+               else
+                       xfer = count;
+
+               if (hdr_size) {
+                       /* prepend MTP data header */
+                       header = (struct mtp_data_header *)req->buf;
+                       header->length = __cpu_to_le32(count);
+                       header->type = __cpu_to_le16(2); /* data packet */
+                       header->command = __cpu_to_le16(dev->xfer_command);
+                       header->transaction_id =
+                                       __cpu_to_le32(dev->xfer_transaction_id);
+               }
+
+               ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
+                                                               &offset);
+               if (ret < 0) {
+                       r = ret;
+                       break;
+               }
+               xfer = ret + hdr_size;
+               hdr_size = 0;
+
+               req->length = xfer;
+               ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+               if (ret < 0) {
+                       DBG(cdev, "send_file_work: xfer error %d\n", ret);
+                       dev->state = STATE_ERROR;
+                       r = -EIO;
+                       break;
+               }
+
+               count -= xfer;
+
+               /* zero this so we don't try to free it on error exit */
+               req = 0;
+       }
+
+       if (req)
+               mtp_req_put(dev, &dev->tx_idle, req);
+
+       DBG(cdev, "send_file_work returning %d\n", r);
+       /* write the result */
+       dev->xfer_result = r;
+       smp_wmb();
+}
+
+/* read from USB and write to a local file */
+static void receive_file_work(struct work_struct *data)
+{
+       struct mtp_dev *dev = container_of(data, struct mtp_dev,
+                                               receive_file_work);
+       struct usb_composite_dev *cdev = dev->cdev;
+       struct usb_request *read_req = NULL, *write_req = NULL;
+       struct file *filp;
+       loff_t offset;
+       int64_t count;
+       int ret, cur_buf = 0;
+       int r = 0;
+
+       /* read our parameters */
+       smp_rmb();
+       filp = dev->xfer_file;
+       offset = dev->xfer_file_offset;
+       count = dev->xfer_file_length;
+
+       DBG(cdev, "receive_file_work(%lld)\n", count);
+
+       while (count > 0 || write_req) {
+               if (count > 0) {
+                       /* queue a request */
+                       read_req = dev->rx_req[cur_buf];
+                       cur_buf = (cur_buf + 1) % RX_REQ_MAX;
+
+                       read_req->length = (count > MTP_BULK_BUFFER_SIZE
+                                       ? MTP_BULK_BUFFER_SIZE : count);
+                       dev->rx_done = 0;
+                       ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
+                       if (ret < 0) {
+                               r = -EIO;
+                               dev->state = STATE_ERROR;
+                               break;
+                       }
+               }
+
+               if (write_req) {
+                       DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
+                       ret = vfs_write(filp, write_req->buf, write_req->actual,
+                               &offset);
+                       DBG(cdev, "vfs_write %d\n", ret);
+                       if (ret != write_req->actual) {
+                               r = -EIO;
+                               dev->state = STATE_ERROR;
+                               break;
+                       }
+                       write_req = NULL;
+               }
+
+               if (read_req) {
+                       /* wait for our last read to complete */
+                       ret = wait_event_interruptible(dev->read_wq,
+                               dev->rx_done || dev->state != STATE_BUSY);
+                       if (dev->state == STATE_CANCELED) {
+                               r = -ECANCELED;
+                               if (!dev->rx_done)
+                                       usb_ep_dequeue(dev->ep_out, read_req);
+                               break;
+                       }
+                       /* if xfer_file_length is 0xFFFFFFFF, then we read until
+                        * we get a zero length packet
+                        */
+                       if (count != 0xFFFFFFFF)
+                               count -= read_req->actual;
+                       if (read_req->actual < read_req->length) {
+                               /*
+                                * short packet is used to signal EOF for
+                                * sizes > 4 gig
+                                */
+                               DBG(cdev, "got short packet\n");
+                               count = 0;
+                       }
+
+                       write_req = read_req;
+                       read_req = NULL;
+               }
+       }
+
+       DBG(cdev, "receive_file_work returning %d\n", r);
+       /* write the result */
+       dev->xfer_result = r;
+       smp_wmb();
+}
+
+static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
+{
+       struct usb_request *req = NULL;
+       int ret;
+       int length = event->length;
+
+       DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
+
+       if (length < 0 || length > INTR_BUFFER_SIZE)
+               return -EINVAL;
+       if (dev->state == STATE_OFFLINE)
+               return -ENODEV;
+
+       ret = wait_event_interruptible_timeout(dev->intr_wq,
+                       (req = mtp_req_get(dev, &dev->intr_idle)),
+                       msecs_to_jiffies(1000));
+       if (!req)
+               return -ETIME;
+
+       if (copy_from_user(req->buf, (void __user *)event->data, length)) {
+               mtp_req_put(dev, &dev->intr_idle, req);
+               return -EFAULT;
+       }
+       req->length = length;
+       ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
+       if (ret)
+               mtp_req_put(dev, &dev->intr_idle, req);
+
+       return ret;
+}
+
+static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+       struct mtp_dev *dev = fp->private_data;
+       struct file *filp = NULL;
+       int ret = -EINVAL;
+
+       if (mtp_lock(&dev->ioctl_excl))
+               return -EBUSY;
+
+       switch (code) {
+       case MTP_SEND_FILE:
+       case MTP_RECEIVE_FILE:
+       case MTP_SEND_FILE_WITH_HEADER:
+       {
+               struct mtp_file_range   mfr;
+               struct work_struct *work;
+
+               spin_lock_irq(&dev->lock);
+               if (dev->state == STATE_CANCELED) {
+                       /* report cancelation to userspace */
+                       dev->state = STATE_READY;
+                       spin_unlock_irq(&dev->lock);
+                       ret = -ECANCELED;
+                       goto out;
+               }
+               if (dev->state == STATE_OFFLINE) {
+                       spin_unlock_irq(&dev->lock);
+                       ret = -ENODEV;
+                       goto out;
+               }
+               dev->state = STATE_BUSY;
+               spin_unlock_irq(&dev->lock);
+
+               if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
+                       ret = -EFAULT;
+                       goto fail;
+               }
+               /* hold a reference to the file while we are working with it */
+               filp = fget(mfr.fd);
+               if (!filp) {
+                       ret = -EBADF;
+                       goto fail;
+               }
+
+               /* write the parameters */
+               dev->xfer_file = filp;
+               dev->xfer_file_offset = mfr.offset;
+               dev->xfer_file_length = mfr.length;
+               smp_wmb();
+
+               if (code == MTP_SEND_FILE_WITH_HEADER) {
+                       work = &dev->send_file_work;
+                       dev->xfer_send_header = 1;
+                       dev->xfer_command = mfr.command;
+                       dev->xfer_transaction_id = mfr.transaction_id;
+               } else if (code == MTP_SEND_FILE) {
+                       work = &dev->send_file_work;
+                       dev->xfer_send_header = 0;
+               } else {
+                       work = &dev->receive_file_work;
+               }
+
+               /* We do the file transfer on a work queue so it will run
+                * in kernel context, which is necessary for vfs_read and
+                * vfs_write to use our buffers in the kernel address space.
+                */
+               queue_work(dev->wq, work);
+               /* wait for operation to complete */
+               flush_workqueue(dev->wq);
+               fput(filp);
+
+               /* read the result */
+               smp_rmb();
+               ret = dev->xfer_result;
+               break;
+       }
+       case MTP_SEND_EVENT:
+       {
+               struct mtp_event        event;
+               /* return here so we don't change dev->state below,
+                * which would interfere with bulk transfer state.
+                */
+               if (copy_from_user(&event, (void __user *)value, sizeof(event)))
+                       ret = -EFAULT;
+               else
+                       ret = mtp_send_event(dev, &event);
+               goto out;
+       }
+       }
+
+fail:
+       spin_lock_irq(&dev->lock);
+       if (dev->state == STATE_CANCELED)
+               ret = -ECANCELED;
+       else if (dev->state != STATE_OFFLINE)
+               dev->state = STATE_READY;
+       spin_unlock_irq(&dev->lock);
+out:
+       mtp_unlock(&dev->ioctl_excl);
+       DBG(dev->cdev, "ioctl returning %d\n", ret);
+       return ret;
+}
+
+static int mtp_open(struct inode *ip, struct file *fp)
+{
+       printk(KERN_INFO "mtp_open\n");
+       if (mtp_lock(&_mtp_dev->open_excl))
+               return -EBUSY;
+
+       /* clear any error condition */
+       if (_mtp_dev->state != STATE_OFFLINE)
+               _mtp_dev->state = STATE_READY;
+
+       fp->private_data = _mtp_dev;
+       return 0;
+}
+
+static int mtp_release(struct inode *ip, struct file *fp)
+{
+       printk(KERN_INFO "mtp_release\n");
+
+       mtp_unlock(&_mtp_dev->open_excl);
+       return 0;
+}
+
+/* file operations for /dev/mtp_usb */
+static const struct file_operations mtp_fops = {
+       .owner = THIS_MODULE,
+       .read = mtp_read,
+       .write = mtp_write,
+       .unlocked_ioctl = mtp_ioctl,
+       .open = mtp_open,
+       .release = mtp_release,
+};
+
+static struct miscdevice mtp_device = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = mtp_shortname,
+       .fops = &mtp_fops,
+};
+
+static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
+                               const struct usb_ctrlrequest *ctrl)
+{
+       struct mtp_dev *dev = _mtp_dev;
+       int     value = -EOPNOTSUPP;
+       u16     w_index = le16_to_cpu(ctrl->wIndex);
+       u16     w_value = le16_to_cpu(ctrl->wValue);
+       u16     w_length = le16_to_cpu(ctrl->wLength);
+       unsigned long   flags;
+
+       VDBG(cdev, "mtp_ctrlrequest "
+                       "%02x.%02x v%04x i%04x l%u\n",
+                       ctrl->bRequestType, ctrl->bRequest,
+                       w_value, w_index, w_length);
+
+       /* Handle MTP OS string */
+       if (ctrl->bRequestType ==
+                       (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+                       && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
+                       && (w_value >> 8) == USB_DT_STRING
+                       && (w_value & 0xFF) == MTP_OS_STRING_ID) {
+               value = (w_length < sizeof(mtp_os_string)
+                               ? w_length : sizeof(mtp_os_string));
+               memcpy(cdev->req->buf, mtp_os_string, value);
+       } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
+               /* Handle MTP OS descriptor */
+               DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
+                       ctrl->bRequest, w_index, w_value, w_length);
+
+               if (ctrl->bRequest == 1
+                               && (ctrl->bRequestType & USB_DIR_IN)
+                               && (w_index == 4 || w_index == 5)) {
+                       value = (w_length < sizeof(mtp_ext_config_desc) ?
+                                       w_length : sizeof(mtp_ext_config_desc));
+                       memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
+               }
+       } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+               DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
+                       ctrl->bRequest, w_index, w_value, w_length);
+
+               if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
+                               && w_value == 0) {
+                       DBG(cdev, "MTP_REQ_CANCEL\n");
+
+                       spin_lock_irqsave(&dev->lock, flags);
+                       if (dev->state == STATE_BUSY) {
+                               dev->state = STATE_CANCELED;
+                               wake_up(&dev->read_wq);
+                               wake_up(&dev->write_wq);
+                       }
+                       spin_unlock_irqrestore(&dev->lock, flags);
+
+                       /* We need to queue a request to read the remaining
+                        *  bytes, but we don't actually need to look at
+                        * the contents.
+                        */
+                       value = w_length;
+               } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
+                               && w_index == 0 && w_value == 0) {
+                       struct mtp_device_status *status = cdev->req->buf;
+                       status->wLength =
+                               __constant_cpu_to_le16(sizeof(*status));
+
+                       DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
+                       spin_lock_irqsave(&dev->lock, flags);
+                       /* device status is "busy" until we report
+                        * the cancelation to userspace
+                        */
+                       if (dev->state == STATE_CANCELED)
+                               status->wCode =
+                                       __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
+                       else
+                               status->wCode =
+                                       __cpu_to_le16(MTP_RESPONSE_OK);
+                       spin_unlock_irqrestore(&dev->lock, flags);
+                       value = sizeof(*status);
+               }
+       }
+
+       /* respond with data transfer or status phase? */
+       if (value >= 0) {
+               int rc;
+               cdev->req->zero = value < w_length;
+               cdev->req->length = value;
+               rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+               if (rc < 0)
+                       ERROR(cdev, "%s: response queue error\n", __func__);
+       }
+       return value;
+}
+
+static int
+mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct usb_composite_dev *cdev = c->cdev;
+       struct mtp_dev  *dev = func_to_mtp(f);
+       int                     id;
+       int                     ret;
+
+       dev->cdev = cdev;
+       DBG(cdev, "mtp_function_bind dev: %p\n", dev);
+
+       /* allocate interface ID(s) */
+       id = usb_interface_id(c, f);
+       if (id < 0)
+               return id;
+       mtp_interface_desc.bInterfaceNumber = id;
+
+       /* allocate endpoints */
+       ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
+                       &mtp_fullspeed_out_desc, &mtp_intr_desc);
+       if (ret)
+               return ret;
+
+       /* support high speed hardware */
+       if (gadget_is_dualspeed(c->cdev->gadget)) {
+               mtp_highspeed_in_desc.bEndpointAddress =
+                       mtp_fullspeed_in_desc.bEndpointAddress;
+               mtp_highspeed_out_desc.bEndpointAddress =
+                       mtp_fullspeed_out_desc.bEndpointAddress;
+       }
+
+       DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+                       gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+                       f->name, dev->ep_in->name, dev->ep_out->name);
+       return 0;
+}
+
+static void
+mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct mtp_dev  *dev = func_to_mtp(f);
+       struct usb_request *req;
+       int i;
+
+       while ((req = mtp_req_get(dev, &dev->tx_idle)))
+               mtp_request_free(req, dev->ep_in);
+       for (i = 0; i < RX_REQ_MAX; i++)
+               mtp_request_free(dev->rx_req[i], dev->ep_out);
+       while ((req = mtp_req_get(dev, &dev->intr_idle)))
+               mtp_request_free(req, dev->ep_intr);
+       dev->state = STATE_OFFLINE;
+}
+
+static int mtp_function_set_alt(struct usb_function *f,
+               unsigned intf, unsigned alt)
+{
+       struct mtp_dev  *dev = func_to_mtp(f);
+       struct usb_composite_dev *cdev = f->config->cdev;
+       int ret;
+
+       DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+       ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+       if (ret)
+               return ret;
+
+       ret = usb_ep_enable(dev->ep_in);
+       if (ret)
+               return ret;
+
+       ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+       if (ret)
+               return ret;
+
+       ret = usb_ep_enable(dev->ep_out);
+       if (ret) {
+               usb_ep_disable(dev->ep_in);
+               return ret;
+       }
+
+       ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
+       if (ret)
+               return ret;
+
+       ret = usb_ep_enable(dev->ep_intr);
+       if (ret) {
+               usb_ep_disable(dev->ep_out);
+               usb_ep_disable(dev->ep_in);
+               return ret;
+       }
+       dev->state = STATE_READY;
+
+       /* readers may be blocked waiting for us to go online */
+       wake_up(&dev->read_wq);
+       return 0;
+}
+
+static void mtp_function_disable(struct usb_function *f)
+{
+       struct mtp_dev  *dev = func_to_mtp(f);
+       struct usb_composite_dev        *cdev = dev->cdev;
+
+       DBG(cdev, "mtp_function_disable\n");
+       dev->state = STATE_OFFLINE;
+       usb_ep_disable(dev->ep_in);
+       usb_ep_disable(dev->ep_out);
+       usb_ep_disable(dev->ep_intr);
+
+       /* readers may be blocked waiting for us to go online */
+       wake_up(&dev->read_wq);
+
+       VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
+{
+       struct mtp_dev *dev = _mtp_dev;
+       int ret = 0;
+
+       printk(KERN_INFO "mtp_bind_config\n");
+
+       /* allocate a string ID for our interface */
+       if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+               ret = usb_string_id(c->cdev);
+               if (ret < 0)
+                       return ret;
+               mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
+               mtp_interface_desc.iInterface = ret;
+       }
+
+       dev->cdev = c->cdev;
+       dev->function.name = "mtp";
+       dev->function.strings = mtp_strings;
+       if (ptp_config) {
+               dev->function.fs_descriptors = fs_ptp_descs;
+               dev->function.hs_descriptors = hs_ptp_descs;
+       } else {
+               dev->function.fs_descriptors = fs_mtp_descs;
+               dev->function.hs_descriptors = hs_mtp_descs;
+       }
+       dev->function.bind = mtp_function_bind;
+       dev->function.unbind = mtp_function_unbind;
+       dev->function.set_alt = mtp_function_set_alt;
+       dev->function.disable = mtp_function_disable;
+
+       return usb_add_function(c, &dev->function);
+}
+
+static int mtp_setup(void)
+{
+       struct mtp_dev *dev;
+       int ret;
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       spin_lock_init(&dev->lock);
+       init_waitqueue_head(&dev->read_wq);
+       init_waitqueue_head(&dev->write_wq);
+       init_waitqueue_head(&dev->intr_wq);
+       atomic_set(&dev->open_excl, 0);
+       atomic_set(&dev->ioctl_excl, 0);
+       INIT_LIST_HEAD(&dev->tx_idle);
+       INIT_LIST_HEAD(&dev->intr_idle);
+
+       dev->wq = create_singlethread_workqueue("f_mtp");
+       if (!dev->wq) {
+               ret = -ENOMEM;
+               goto err1;
+       }
+       INIT_WORK(&dev->send_file_work, send_file_work);
+       INIT_WORK(&dev->receive_file_work, receive_file_work);
+
+       _mtp_dev = dev;
+
+       ret = misc_register(&mtp_device);
+       if (ret)
+               goto err2;
+
+       return 0;
+
+err2:
+       destroy_workqueue(dev->wq);
+err1:
+       _mtp_dev = NULL;
+       kfree(dev);
+       printk(KERN_ERR "mtp gadget driver failed to initialize\n");
+       return ret;
+}
+
+static void mtp_cleanup(void)
+{
+       struct mtp_dev *dev = _mtp_dev;
+
+       if (!dev)
+               return;
+
+       misc_deregister(&mtp_device);
+       destroy_workqueue(dev->wq);
+       _mtp_dev = NULL;
+       kfree(dev);
+}
index 36e8c44d8e5e2fb04222b86b4e9fbc4d3a493083..21c5ee2482d6c3f3181fa14a75f8c375d4f971d5 100644 (file)
@@ -821,12 +821,12 @@ rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
        if (!can_support_rndis(c) || !ethaddr)
                return -EINVAL;
 
-       if (rndis_string_defs[0].id == 0) {
-               /* ... and setup RNDIS itself */
-               status = rndis_init();
-               if (status < 0)
-                       return status;
+       /* setup RNDIS itself */
+       status = rndis_init();
+       if (status < 0)
+               return status;
 
+       if (rndis_string_defs[0].id == 0) {
                status = usb_string_ids_tab(c->cdev, rndis_string_defs);
                if (status)
                        return status;
index 1e4cfb05f70b9a0bba6a2fa421ef696d7649e8e8..693f0c24d51608955055a558fcc9d2e440495da3 100644 (file)
@@ -1127,11 +1127,15 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
 
 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */
 
+static bool rndis_initialized;
 
 int rndis_init(void)
 {
        u8 i;
 
+       if (rndis_initialized)
+               return 0;
+
        for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
                char name [20];
@@ -1158,6 +1162,7 @@ int rndis_init(void)
                INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue));
        }
 
+       rndis_initialized = true;
        return 0;
 }
 
@@ -1166,7 +1171,13 @@ void rndis_exit(void)
 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
        u8 i;
        char name[20];
+#endif
 
+       if (!rndis_initialized)
+               return;
+       rndis_initialized = false;
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
        for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
                sprintf(name, NAME_TEMPLATE, i);
                remove_proc_entry(name, NULL);
index b369292d4b90cd0c5c66dd147434f5d8aadbd752..72068082e83c2b03d452ee808a2e262e28fd666c 100644 (file)
@@ -1126,6 +1126,7 @@ int gserial_alloc_line(unsigned char *line_num)
 
        tty_dev = tty_port_register_device(&ports[port_num].port->port,
                        gs_tty_driver, port_num, NULL);
+
        if (IS_ERR(tty_dev)) {
                struct gs_port  *port;
                pr_err("%s: failed to register tty for port %d, err %ld\n",
index 5514822114a575edc5dca1cf992d00ce4e0d74d0..afe9b9e50cc44ec9a62cfa54264ff35285277eae 100644 (file)
@@ -335,7 +335,15 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
                driver->unbind(udc->gadget);
                goto err1;
        }
-       usb_gadget_connect(udc->gadget);
+       /*
+        * HACK: The Android gadget driver disconnects the gadget
+        * on bind and expects the gadget to stay disconnected until
+        * it calls usb_gadget_connect when userspace is ready. Remove
+        * the call to usb_gadget_connect bellow to avoid enabling the
+        * pullup before userspace is ready.
+        *
+        * usb_gadget_connect(udc->gadget);
+        */
 
        kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
        return 0;
index be41733a5eac27d59591a5d966e5a97a559083c4..0dd0da0d0ac528f86326be551e44b4296c8a1f21 100644 (file)
@@ -16,6 +16,14 @@ menuconfig USB_PHY
          If you're not sure if this applies to you, it probably doesn't;
          say N here.
 
+config USB_OTG_WAKELOCK
+       bool "Hold a wakelock when USB connected"
+       depends on WAKELOCK
+       select USB_OTG_UTILS
+       help
+         Select this to automatically hold a wakelock when USB is
+         connected, preventing suspend.
+
 if USB_PHY
 
 #
index a9169cb1e6fcca8d85856b4414bb36d89df1239d..a0a6cbad88064b1c0decbb6bfd952ba1282d1333 100644 (file)
@@ -5,6 +5,7 @@
 ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
 
 obj-$(CONFIG_USB_PHY)                  += phy.o
+obj-$(CONFIG_USB_OTG_WAKELOCK)         += otg-wakelock.o
 
 # transceiver drivers, keep the list sorted
 
diff --git a/drivers/usb/phy/otg-wakelock.c b/drivers/usb/phy/otg-wakelock.c
new file mode 100644 (file)
index 0000000..479376b
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+ * otg-wakelock.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/wakelock.h>
+#include <linux/spinlock.h>
+#include <linux/usb/otg.h>
+
+#define TEMPORARY_HOLD_TIME    2000
+
+static bool enabled = true;
+static struct usb_phy *otgwl_xceiv;
+static struct notifier_block otgwl_nb;
+
+/*
+ * otgwl_spinlock is held while the VBUS lock is grabbed or dropped and the
+ * held field is updated to match.
+ */
+
+static DEFINE_SPINLOCK(otgwl_spinlock);
+
+/*
+ * Only one lock, but since these 3 fields are associated with each other...
+ */
+
+struct otgwl_lock {
+       char name[40];
+       struct wake_lock wakelock;
+       bool held;
+};
+
+/*
+ * VBUS present lock.  Also used as a timed lock on charger
+ * connect/disconnect and USB host disconnect, to allow the system
+ * to react to the change in power.
+ */
+
+static struct otgwl_lock vbus_lock;
+
+static void otgwl_hold(struct otgwl_lock *lock)
+{
+       if (!lock->held) {
+               wake_lock(&lock->wakelock);
+               lock->held = true;
+       }
+}
+
+static void otgwl_temporary_hold(struct otgwl_lock *lock)
+{
+       wake_lock_timeout(&lock->wakelock,
+                         msecs_to_jiffies(TEMPORARY_HOLD_TIME));
+       lock->held = false;
+}
+
+static void otgwl_drop(struct otgwl_lock *lock)
+{
+       if (lock->held) {
+               wake_unlock(&lock->wakelock);
+               lock->held = false;
+       }
+}
+
+static void otgwl_handle_event(unsigned long event)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&otgwl_spinlock, irqflags);
+
+       if (!enabled) {
+               otgwl_drop(&vbus_lock);
+               spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+               return;
+       }
+
+       switch (event) {
+       case USB_EVENT_VBUS:
+       case USB_EVENT_ENUMERATED:
+               otgwl_hold(&vbus_lock);
+               break;
+
+       case USB_EVENT_NONE:
+       case USB_EVENT_ID:
+       case USB_EVENT_CHARGER:
+               otgwl_temporary_hold(&vbus_lock);
+               break;
+
+       default:
+               break;
+       }
+
+       spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+}
+
+static int otgwl_otg_notifications(struct notifier_block *nb,
+                                  unsigned long event, void *unused)
+{
+       otgwl_handle_event(event);
+       return NOTIFY_OK;
+}
+
+static int set_enabled(const char *val, const struct kernel_param *kp)
+{
+       int rv = param_set_bool(val, kp);
+
+       if (rv)
+               return rv;
+
+       if (otgwl_xceiv)
+               otgwl_handle_event(otgwl_xceiv->last_event);
+
+       return 0;
+}
+
+static struct kernel_param_ops enabled_param_ops = {
+       .set = set_enabled,
+       .get = param_get_bool,
+};
+
+module_param_cb(enabled, &enabled_param_ops, &enabled, 0644);
+MODULE_PARM_DESC(enabled, "enable wakelock when VBUS present");
+
+static int __init otg_wakelock_init(void)
+{
+       int ret;
+       struct usb_phy *phy;
+
+       phy = usb_get_phy(USB_PHY_TYPE_USB2);
+
+       if (IS_ERR(phy)) {
+               pr_err("%s: No USB transceiver found\n", __func__);
+               return PTR_ERR(phy);
+       }
+       otgwl_xceiv = phy;
+
+       snprintf(vbus_lock.name, sizeof(vbus_lock.name), "vbus-%s",
+                dev_name(otgwl_xceiv->dev));
+       wake_lock_init(&vbus_lock.wakelock, WAKE_LOCK_SUSPEND,
+                      vbus_lock.name);
+
+       otgwl_nb.notifier_call = otgwl_otg_notifications;
+       ret = usb_register_notifier(otgwl_xceiv, &otgwl_nb);
+
+       if (ret) {
+               pr_err("%s: usb_register_notifier on transceiver %s"
+                      " failed\n", __func__,
+                      dev_name(otgwl_xceiv->dev));
+               otgwl_xceiv = NULL;
+               wake_lock_destroy(&vbus_lock.wakelock);
+               return ret;
+       }
+
+       otgwl_handle_event(otgwl_xceiv->last_event);
+       return ret;
+}
+
+late_initcall(otg_wakelock_init);
index 29a5121ce7fda2e6a127eabb36ba958681b49724..a3279c7def71b948fe68094a7fea161bba09123a 100644 (file)
@@ -2496,6 +2496,7 @@ source "drivers/video/omap2/Kconfig"
 source "drivers/video/exynos/Kconfig"
 source "drivers/video/mmp/Kconfig"
 source "drivers/video/backlight/Kconfig"
+source "drivers/video/adf/Kconfig"
 
 if VT
        source "drivers/video/console/Kconfig"
index 33869eea4981580a03e85e758df7faf2586fcec6..3adbd32eb091aa6e3e54fb947e292deee54b24b8 100644 (file)
@@ -12,6 +12,7 @@ fb-y                              := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
                                      modedb.o fbcvt.o
 fb-objs                           := $(fb-y)
 
+obj-$(CONFIG_ADF)                += adf/
 obj-$(CONFIG_VT)                 += console/
 obj-$(CONFIG_LOGO)               += logo/
 obj-y                            += backlight/
diff --git a/drivers/video/adf/Kconfig b/drivers/video/adf/Kconfig
new file mode 100644 (file)
index 0000000..33858b7
--- /dev/null
@@ -0,0 +1,14 @@
+menuconfig ADF
+       depends on SYNC
+       depends on DMA_SHARED_BUFFER
+       tristate "Atomic Display Framework"
+
+menuconfig ADF_FBDEV
+       depends on ADF
+       depends on FB
+       tristate "Helper for implementing the fbdev API in ADF drivers"
+
+menuconfig ADF_MEMBLOCK
+       depends on ADF
+       depends on HAVE_MEMBLOCK
+       tristate "Helper for using memblocks as buffers in ADF drivers"
diff --git a/drivers/video/adf/Makefile b/drivers/video/adf/Makefile
new file mode 100644 (file)
index 0000000..78d0915
--- /dev/null
@@ -0,0 +1,15 @@
+ccflags-y := -Idrivers/staging/android
+
+CFLAGS_adf.o := -I$(src)
+
+obj-$(CONFIG_ADF) += adf.o \
+       adf_client.o \
+       adf_fops.o \
+       adf_format.o \
+       adf_sysfs.o
+
+obj-$(CONFIG_COMPAT) += adf_fops32.o
+
+obj-$(CONFIG_ADF_FBDEV) += adf_fbdev.o
+
+obj-$(CONFIG_ADF_MEMBLOCK) += adf_memblock.o
diff --git a/drivers/video/adf/adf.c b/drivers/video/adf/adf.c
new file mode 100644 (file)
index 0000000..231881c
--- /dev/null
@@ -0,0 +1,1184 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ * adf_modeinfo_{set_name,set_vrefresh} modified from
+ * drivers/gpu/drm/drm_modes.c
+ * adf_format_validate_yuv modified from framebuffer_check in
+ * drivers/gpu/drm/drm_crtc.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <video/adf_format.h>
+
+#include "sw_sync.h"
+#include "sync.h"
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+#define CREATE_TRACE_POINTS
+#include "adf_trace.h"
+
+#define ADF_SHORT_FENCE_TIMEOUT (1 * MSEC_PER_SEC)
+#define ADF_LONG_FENCE_TIMEOUT (10 * MSEC_PER_SEC)
+
+static DEFINE_IDR(adf_devices);
+
+static void adf_fence_wait(struct adf_device *dev, struct sync_fence *fence)
+{
+       /* sync_fence_wait() dumps debug information on timeout.  Experience
+          has shown that if the pipeline gets stuck, a short timeout followed
+          by a longer one provides useful information for debugging. */
+       int err = sync_fence_wait(fence, ADF_SHORT_FENCE_TIMEOUT);
+       if (err >= 0)
+               return;
+
+       if (err == -ETIME)
+               err = sync_fence_wait(fence, ADF_LONG_FENCE_TIMEOUT);
+
+       if (err < 0)
+               dev_warn(&dev->base.dev, "error waiting on fence: %d\n", err);
+}
+
+void adf_buffer_cleanup(struct adf_buffer *buf)
+{
+       size_t i;
+       for (i = 0; i < ARRAY_SIZE(buf->dma_bufs); i++)
+               if (buf->dma_bufs[i])
+                       dma_buf_put(buf->dma_bufs[i]);
+
+       if (buf->acquire_fence)
+               sync_fence_put(buf->acquire_fence);
+}
+
+void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
+               struct adf_buffer *buf)
+{
+       /* calling adf_buffer_mapping_cleanup() is safe even if mapping is
+          uninitialized or partially-initialized, as long as it was
+          zeroed on allocation */
+       size_t i;
+       for (i = 0; i < ARRAY_SIZE(mapping->sg_tables); i++) {
+               if (mapping->sg_tables[i])
+                       dma_buf_unmap_attachment(mapping->attachments[i],
+                                       mapping->sg_tables[i], DMA_TO_DEVICE);
+               if (mapping->attachments[i])
+                       dma_buf_detach(buf->dma_bufs[i],
+                                       mapping->attachments[i]);
+       }
+}
+
+void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post)
+{
+       size_t i;
+
+       if (post->state)
+               dev->ops->state_free(dev, post->state);
+
+       for (i = 0; i < post->config.n_bufs; i++) {
+               adf_buffer_mapping_cleanup(&post->config.mappings[i],
+                               &post->config.bufs[i]);
+               adf_buffer_cleanup(&post->config.bufs[i]);
+       }
+
+       kfree(post->config.custom_data);
+       kfree(post->config.mappings);
+       kfree(post->config.bufs);
+       kfree(post);
+}
+
+static void adf_sw_advance_timeline(struct adf_device *dev)
+{
+#ifdef CONFIG_SW_SYNC
+       sw_sync_timeline_inc(dev->timeline, 1);
+#else
+       BUG();
+#endif
+}
+
+static void adf_post_work_func(struct kthread_work *work)
+{
+       struct adf_device *dev =
+                       container_of(work, struct adf_device, post_work);
+       struct adf_pending_post *post, *next;
+       struct list_head saved_list;
+
+       mutex_lock(&dev->post_lock);
+       memcpy(&saved_list, &dev->post_list, sizeof(saved_list));
+       list_replace_init(&dev->post_list, &saved_list);
+       mutex_unlock(&dev->post_lock);
+
+       list_for_each_entry_safe(post, next, &saved_list, head) {
+               int i;
+
+               for (i = 0; i < post->config.n_bufs; i++) {
+                       struct sync_fence *fence =
+                                       post->config.bufs[i].acquire_fence;
+                       if (fence)
+                               adf_fence_wait(dev, fence);
+               }
+
+               dev->ops->post(dev, &post->config, post->state);
+
+               if (dev->ops->advance_timeline)
+                       dev->ops->advance_timeline(dev, &post->config,
+                                       post->state);
+               else
+                       adf_sw_advance_timeline(dev);
+
+               list_del(&post->head);
+               if (dev->onscreen)
+                       adf_post_cleanup(dev, dev->onscreen);
+               dev->onscreen = post;
+       }
+}
+
+void adf_attachment_free(struct adf_attachment_list *attachment)
+{
+       list_del(&attachment->head);
+       kfree(attachment);
+}
+
+struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
+               enum adf_event_type type)
+{
+       struct rb_root *root = &obj->event_refcount;
+       struct rb_node **new = &(root->rb_node);
+       struct rb_node *parent = NULL;
+       struct adf_event_refcount *refcount;
+
+       while (*new) {
+               refcount = container_of(*new, struct adf_event_refcount, node);
+               parent = *new;
+
+               if (refcount->type > type)
+                       new = &(*new)->rb_left;
+               else if (refcount->type < type)
+                       new = &(*new)->rb_right;
+               else
+                       return refcount;
+       }
+
+       refcount = kzalloc(sizeof(*refcount), GFP_KERNEL);
+       if (!refcount)
+               return NULL;
+       refcount->type = type;
+
+       rb_link_node(&refcount->node, parent, new);
+       rb_insert_color(&refcount->node, root);
+       return refcount;
+}
+
+/**
+ * adf_event_get - increase the refcount for an event
+ *
+ * @obj: the object that produces the event
+ * @type: the event type
+ *
+ * ADF will call the object's set_event() op if needed.  ops are allowed
+ * to sleep, so adf_event_get() must NOT be called from an atomic context.
+ *
+ * Returns 0 if successful, or -%EINVAL if the object does not support the
+ * requested event type.
+ */
+int adf_event_get(struct adf_obj *obj, enum adf_event_type type)
+{
+       struct adf_event_refcount *refcount;
+       int old_refcount;
+       int ret;
+
+       ret = adf_obj_check_supports_event(obj, type);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&obj->event_lock);
+
+       refcount = adf_obj_find_event_refcount(obj, type);
+       if (!refcount) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       old_refcount = refcount->refcount++;
+
+       if (old_refcount == 0) {
+               obj->ops->set_event(obj, type, true);
+               trace_adf_event_enable(obj, type);
+       }
+
+done:
+       mutex_unlock(&obj->event_lock);
+       return ret;
+}
+EXPORT_SYMBOL(adf_event_get);
+
+/**
+ * adf_event_put - decrease the refcount for an event
+ *
+ * @obj: the object that produces the event
+ * @type: the event type
+ *
+ * ADF will call the object's set_event() op if needed.  ops are allowed
+ * to sleep, so adf_event_put() must NOT be called from an atomic context.
+ *
+ * Returns 0 if successful, -%EINVAL if the object does not support the
+ * requested event type, or -%EALREADY if the refcount is already 0.
+ */
+int adf_event_put(struct adf_obj *obj, enum adf_event_type type)
+{
+       struct adf_event_refcount *refcount;
+       int old_refcount;
+       int ret;
+
+       ret = adf_obj_check_supports_event(obj, type);
+       if (ret < 0)
+               return ret;
+
+
+       mutex_lock(&obj->event_lock);
+
+       refcount = adf_obj_find_event_refcount(obj, type);
+       if (!refcount) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       old_refcount = refcount->refcount--;
+
+       if (WARN_ON(old_refcount == 0)) {
+               refcount->refcount++;
+               ret = -EALREADY;
+       } else if (old_refcount == 1) {
+               obj->ops->set_event(obj, type, false);
+               trace_adf_event_disable(obj, type);
+       }
+
+done:
+       mutex_unlock(&obj->event_lock);
+       return ret;
+}
+EXPORT_SYMBOL(adf_event_put);
+
+/**
+ * adf_vsync_wait - wait for a vsync event on a display interface
+ *
+ * @intf: the display interface
+ * @timeout: timeout in jiffies (0 = wait indefinitely)
+ *
+ * adf_vsync_wait() may sleep, so it must NOT be called from an atomic context.
+ *
+ * This function returns -%ERESTARTSYS if it is interrupted by a signal.
+ * If @timeout == 0 then this function returns 0 on vsync. If @timeout > 0 then
+ * this function returns the number of remaining jiffies or -%ETIMEDOUT on
+ * timeout.
+ */
+int adf_vsync_wait(struct adf_interface *intf, long timeout)
+{
+       ktime_t timestamp;
+       int ret;
+       unsigned long flags;
+
+       read_lock_irqsave(&intf->vsync_lock, flags);
+       timestamp = intf->vsync_timestamp;
+       read_unlock_irqrestore(&intf->vsync_lock, flags);
+
+       adf_vsync_get(intf);
+       if (timeout) {
+               ret = wait_event_interruptible_timeout(intf->vsync_wait,
+                               !ktime_equal(timestamp,
+                                               intf->vsync_timestamp),
+                               msecs_to_jiffies(timeout));
+               if (ret == 0 && ktime_equal(timestamp, intf->vsync_timestamp))
+                       ret = -ETIMEDOUT;
+       } else {
+               ret = wait_event_interruptible(intf->vsync_wait,
+                               !ktime_equal(timestamp,
+                                               intf->vsync_timestamp));
+       }
+       adf_vsync_put(intf);
+
+       return ret;
+}
+EXPORT_SYMBOL(adf_vsync_wait);
+
+static void adf_event_queue(struct adf_obj *obj, struct adf_event *event)
+{
+       struct adf_file *file;
+       unsigned long flags;
+
+       trace_adf_event(obj, event->type);
+
+       spin_lock_irqsave(&obj->file_lock, flags);
+
+       list_for_each_entry(file, &obj->file_list, head)
+               if (test_bit(event->type, file->event_subscriptions))
+                       adf_file_queue_event(file, event);
+
+       spin_unlock_irqrestore(&obj->file_lock, flags);
+}
+
+/**
+ * adf_event_notify - notify userspace of a driver-private event
+ *
+ * @obj: the ADF object that produced the event
+ * @event: the event
+ *
+ * adf_event_notify() may be called safely from an atomic context.  It will
+ * copy @event if needed, so @event may point to a variable on the stack.
+ *
+ * Drivers must NOT call adf_event_notify() for vsync and hotplug events.
+ * ADF provides adf_vsync_notify() and
+ * adf_hotplug_notify_{connected,disconnected}() for these events.
+ */
+int adf_event_notify(struct adf_obj *obj, struct adf_event *event)
+{
+       if (WARN_ON(event->type == ADF_EVENT_VSYNC ||
+                       event->type == ADF_EVENT_HOTPLUG))
+               return -EINVAL;
+
+       adf_event_queue(obj, event);
+       return 0;
+}
+EXPORT_SYMBOL(adf_event_notify);
+
+/**
+ * adf_vsync_notify - notify ADF of a display interface's vsync event
+ *
+ * @intf: the display interface
+ * @timestamp: the time the vsync occurred
+ *
+ * adf_vsync_notify() may be called safely from an atomic context.
+ */
+void adf_vsync_notify(struct adf_interface *intf, ktime_t timestamp)
+{
+       unsigned long flags;
+       struct adf_vsync_event event;
+
+       write_lock_irqsave(&intf->vsync_lock, flags);
+       intf->vsync_timestamp = timestamp;
+       write_unlock_irqrestore(&intf->vsync_lock, flags);
+
+       wake_up_interruptible_all(&intf->vsync_wait);
+
+       event.base.type = ADF_EVENT_VSYNC;
+       event.base.length = sizeof(event);
+       event.timestamp = ktime_to_ns(timestamp);
+       adf_event_queue(&intf->base, &event.base);
+}
+EXPORT_SYMBOL(adf_vsync_notify);
+
+void adf_hotplug_notify(struct adf_interface *intf, bool connected,
+               struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+       unsigned long flags;
+       struct adf_hotplug_event event;
+       struct drm_mode_modeinfo *old_modelist;
+
+       write_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+       old_modelist = intf->modelist;
+       intf->hotplug_detect = connected;
+       intf->modelist = modelist;
+       intf->n_modes = n_modes;
+       write_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+       kfree(old_modelist);
+
+       event.base.length = sizeof(event);
+       event.base.type = ADF_EVENT_HOTPLUG;
+       event.connected = connected;
+       adf_event_queue(&intf->base, &event.base);
+}
+
+/**
+ * adf_hotplug_notify_connected - notify ADF of a display interface being
+ * connected to a display
+ *
+ * @intf: the display interface
+ * @modelist: hardware modes supported by display
+ * @n_modes: length of modelist
+ *
+ * @modelist is copied as needed, so it may point to a variable on the stack.
+ *
+ * adf_hotplug_notify_connected() may NOT be called safely from an atomic
+ * context.
+ *
+ * Returns 0 on success or error code (<0) on error.
+ */
+int adf_hotplug_notify_connected(struct adf_interface *intf,
+               struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+       struct drm_mode_modeinfo *modelist_copy;
+
+       if (n_modes > ADF_MAX_MODES)
+               return -ENOMEM;
+
+       modelist_copy = kzalloc(sizeof(modelist_copy[0]) * n_modes,
+                       GFP_KERNEL);
+       if (!modelist_copy)
+               return -ENOMEM;
+       memcpy(modelist_copy, modelist, sizeof(modelist_copy[0]) * n_modes);
+
+       adf_hotplug_notify(intf, true, modelist_copy, n_modes);
+       return 0;
+}
+EXPORT_SYMBOL(adf_hotplug_notify_connected);
+
+/**
+ * adf_hotplug_notify_disconnected - notify ADF of a display interface being
+ * disconnected from a display
+ *
+ * @intf: the display interface
+ *
+ * adf_hotplug_notify_disconnected() may be called safely from an atomic
+ * context.
+ */
+void adf_hotplug_notify_disconnected(struct adf_interface *intf)
+{
+       adf_hotplug_notify(intf, false, NULL, 0);
+}
+EXPORT_SYMBOL(adf_hotplug_notify_disconnected);
+
+static int adf_obj_init(struct adf_obj *obj, enum adf_obj_type type,
+               struct idr *idr, struct adf_device *parent,
+               const struct adf_obj_ops *ops, const char *fmt, va_list args)
+{
+       int ret;
+
+       if (ops && ops->supports_event && !ops->set_event) {
+               pr_err("%s: %s implements supports_event but not set_event\n",
+                               __func__, adf_obj_type_str(type));
+               return -EINVAL;
+       }
+
+       ret = idr_alloc(idr, obj, 0, 0, GFP_KERNEL);
+       if (ret < 0) {
+               pr_err("%s: allocating object id failed: %d\n", __func__, ret);
+               return ret;
+       }
+       obj->id = ret;
+
+       vscnprintf(obj->name, sizeof(obj->name), fmt, args);
+
+       obj->type = type;
+       obj->ops = ops;
+       obj->parent = parent;
+       mutex_init(&obj->event_lock);
+       obj->event_refcount = RB_ROOT;
+       spin_lock_init(&obj->file_lock);
+       INIT_LIST_HEAD(&obj->file_list);
+       return 0;
+}
+
+static void adf_obj_destroy(struct adf_obj *obj, struct idr *idr)
+{
+       struct rb_node *node = rb_first(&obj->event_refcount);
+
+       while (node) {
+               struct adf_event_refcount *refcount =
+                               container_of(node, struct adf_event_refcount,
+                                               node);
+               rb_erase(&refcount->node, &obj->event_refcount);
+               kfree(refcount);
+               node = rb_first(&obj->event_refcount);
+       }
+
+       mutex_destroy(&obj->event_lock);
+       idr_remove(idr, obj->id);
+}
+
+/**
+ * adf_device_init - initialize ADF-internal data for a display device
+ * and create sysfs entries
+ *
+ * @dev: the display device
+ * @parent: the device's parent device
+ * @ops: the device's associated ops
+ * @fmt: formatting string for the display device's name
+ *
+ * @fmt specifies the device's sysfs filename and the name returned to
+ * userspace through the %ADF_GET_DEVICE_DATA ioctl.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_device_init(struct adf_device *dev, struct device *parent,
+               const struct adf_device_ops *ops, const char *fmt, ...)
+{
+       int ret;
+       va_list args;
+
+       if (!ops->validate || !ops->post) {
+               pr_err("%s: device must implement validate and post\n",
+                               __func__);
+               return -EINVAL;
+       }
+
+       if (!ops->complete_fence && !ops->advance_timeline) {
+               if (!IS_ENABLED(CONFIG_SW_SYNC)) {
+                       pr_err("%s: device requires sw_sync but it is not enabled in the kernel\n",
+                                       __func__);
+                       return -EINVAL;
+               }
+       } else if (!(ops->complete_fence && ops->advance_timeline)) {
+               pr_err("%s: device must implement both complete_fence and advance_timeline, or implement neither\n",
+                               __func__);
+               return -EINVAL;
+       }
+
+       memset(dev, 0, sizeof(*dev));
+
+       va_start(args, fmt);
+       ret = adf_obj_init(&dev->base, ADF_OBJ_DEVICE, &adf_devices, dev,
+                       &ops->base, fmt, args);
+       va_end(args);
+       if (ret < 0)
+               return ret;
+
+       dev->dev = parent;
+       dev->ops = ops;
+       idr_init(&dev->overlay_engines);
+       idr_init(&dev->interfaces);
+       mutex_init(&dev->client_lock);
+       INIT_LIST_HEAD(&dev->post_list);
+       mutex_init(&dev->post_lock);
+       init_kthread_worker(&dev->post_worker);
+       INIT_LIST_HEAD(&dev->attached);
+       INIT_LIST_HEAD(&dev->attach_allowed);
+
+       dev->post_thread = kthread_run(kthread_worker_fn,
+                       &dev->post_worker, dev->base.name);
+       if (IS_ERR(dev->post_thread)) {
+               ret = PTR_ERR(dev->post_thread);
+               dev->post_thread = NULL;
+
+               pr_err("%s: failed to run config posting thread: %d\n",
+                               __func__, ret);
+               goto err;
+       }
+       init_kthread_work(&dev->post_work, adf_post_work_func);
+
+       ret = adf_device_sysfs_init(dev);
+       if (ret < 0)
+               goto err;
+
+       return 0;
+
+err:
+       adf_device_destroy(dev);
+       return ret;
+}
+EXPORT_SYMBOL(adf_device_init);
+
+/**
+ * adf_device_destroy - clean up ADF-internal data for a display device
+ *
+ * @dev: the display device
+ */
+void adf_device_destroy(struct adf_device *dev)
+{
+       struct adf_attachment_list *entry, *next;
+
+       idr_destroy(&dev->interfaces);
+       idr_destroy(&dev->overlay_engines);
+
+       if (dev->post_thread) {
+               flush_kthread_worker(&dev->post_worker);
+               kthread_stop(dev->post_thread);
+       }
+
+       if (dev->onscreen)
+               adf_post_cleanup(dev, dev->onscreen);
+       adf_device_sysfs_destroy(dev);
+       list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+               adf_attachment_free(entry);
+       }
+       list_for_each_entry_safe(entry, next, &dev->attached, head) {
+               adf_attachment_free(entry);
+       }
+       mutex_destroy(&dev->post_lock);
+       mutex_destroy(&dev->client_lock);
+       adf_obj_destroy(&dev->base, &adf_devices);
+}
+EXPORT_SYMBOL(adf_device_destroy);
+
+/**
+ * adf_interface_init - initialize ADF-internal data for a display interface
+ * and create sysfs entries
+ *
+ * @intf: the display interface
+ * @dev: the interface's "parent" display device
+ * @type: interface type (see enum @adf_interface_type)
+ * @idx: which interface of type @type;
+ *     e.g. interface DSI.1 -> @type=%ADF_INTF_TYPE_DSI, @idx=1
+ * @flags: informational flags (bitmask of %ADF_INTF_FLAG_* values)
+ * @ops: the interface's associated ops
+ * @fmt: formatting string for the display interface's name
+ *
+ * @dev must have previously been initialized with adf_device_init().
+ *
+ * @fmt affects the name returned to userspace through the
+ * %ADF_GET_INTERFACE_DATA ioctl.  It does not affect the sysfs filename,
+ * which is derived from @dev's name.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_interface_init(struct adf_interface *intf, struct adf_device *dev,
+               enum adf_interface_type type, u32 idx, u32 flags,
+               const struct adf_interface_ops *ops, const char *fmt, ...)
+{
+       int ret;
+       va_list args;
+       const u32 allowed_flags = ADF_INTF_FLAG_PRIMARY |
+                       ADF_INTF_FLAG_EXTERNAL;
+
+       if (dev->n_interfaces == ADF_MAX_INTERFACES) {
+               pr_err("%s: parent device %s has too many interfaces\n",
+                               __func__, dev->base.name);
+               return -ENOMEM;
+       }
+
+       if (type >= ADF_INTF_MEMORY && type <= ADF_INTF_TYPE_DEVICE_CUSTOM) {
+               pr_err("%s: invalid interface type %u\n", __func__, type);
+               return -EINVAL;
+       }
+
+       if (flags & ~allowed_flags) {
+               pr_err("%s: invalid interface flags 0x%X\n", __func__,
+                               flags & ~allowed_flags);
+               return -EINVAL;
+       }
+
+       memset(intf, 0, sizeof(*intf));
+
+       va_start(args, fmt);
+       ret = adf_obj_init(&intf->base, ADF_OBJ_INTERFACE, &dev->interfaces,
+                       dev, ops ? &ops->base : NULL, fmt, args);
+       va_end(args);
+       if (ret < 0)
+               return ret;
+
+       intf->type = type;
+       intf->idx = idx;
+       intf->flags = flags;
+       intf->ops = ops;
+       intf->dpms_state = DRM_MODE_DPMS_OFF;
+       init_waitqueue_head(&intf->vsync_wait);
+       rwlock_init(&intf->vsync_lock);
+       rwlock_init(&intf->hotplug_modelist_lock);
+
+       ret = adf_interface_sysfs_init(intf);
+       if (ret < 0)
+               goto err;
+       dev->n_interfaces++;
+
+       return 0;
+
+err:
+       adf_obj_destroy(&intf->base, &dev->interfaces);
+       return ret;
+}
+EXPORT_SYMBOL(adf_interface_init);
+
+/**
+ * adf_interface_destroy - clean up ADF-internal data for a display interface
+ *
+ * @intf: the display interface
+ */
+void adf_interface_destroy(struct adf_interface *intf)
+{
+       struct adf_device *dev = adf_interface_parent(intf);
+       struct adf_attachment_list *entry, *next;
+
+       mutex_lock(&dev->client_lock);
+       list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+               if (entry->attachment.interface == intf) {
+                       adf_attachment_free(entry);
+                       dev->n_attach_allowed--;
+               }
+       }
+       list_for_each_entry_safe(entry, next, &dev->attached, head) {
+               if (entry->attachment.interface == intf) {
+                       adf_device_detach_op(dev,
+                                       entry->attachment.overlay_engine, intf);
+                       adf_attachment_free(entry);
+                       dev->n_attached--;
+               }
+       }
+       kfree(intf->modelist);
+       adf_interface_sysfs_destroy(intf);
+       adf_obj_destroy(&intf->base, &dev->interfaces);
+       dev->n_interfaces--;
+       mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_interface_destroy);
+
+static bool adf_overlay_engine_has_custom_formats(
+               const struct adf_overlay_engine_ops *ops)
+{
+       size_t i;
+       for (i = 0; i < ops->n_supported_formats; i++)
+               if (!adf_format_is_standard(ops->supported_formats[i]))
+                       return true;
+       return false;
+}
+
+/**
+ * adf_overlay_engine_init - initialize ADF-internal data for an
+ * overlay engine and create sysfs entries
+ *
+ * @eng: the overlay engine
+ * @dev: the overlay engine's "parent" display device
+ * @ops: the overlay engine's associated ops
+ * @fmt: formatting string for the overlay engine's name
+ *
+ * @dev must have previously been initialized with adf_device_init().
+ *
+ * @fmt affects the name returned to userspace through the
+ * %ADF_GET_OVERLAY_ENGINE_DATA ioctl.  It does not affect the sysfs filename,
+ * which is derived from @dev's name.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_overlay_engine_init(struct adf_overlay_engine *eng,
+               struct adf_device *dev,
+               const struct adf_overlay_engine_ops *ops, const char *fmt, ...)
+{
+       int ret;
+       va_list args;
+
+       if (!ops->supported_formats) {
+               pr_err("%s: overlay engine must support at least one format\n",
+                               __func__);
+               return -EINVAL;
+       }
+
+       if (ops->n_supported_formats > ADF_MAX_SUPPORTED_FORMATS) {
+               pr_err("%s: overlay engine supports too many formats\n",
+                               __func__);
+               return -EINVAL;
+       }
+
+       if (adf_overlay_engine_has_custom_formats(ops) &&
+                       !dev->ops->validate_custom_format) {
+               pr_err("%s: overlay engine has custom formats but parent device %s does not implement validate_custom_format\n",
+                               __func__, dev->base.name);
+               return -EINVAL;
+       }
+
+       memset(eng, 0, sizeof(*eng));
+
+       va_start(args, fmt);
+       ret = adf_obj_init(&eng->base, ADF_OBJ_OVERLAY_ENGINE,
+                       &dev->overlay_engines, dev, &ops->base, fmt, args);
+       va_end(args);
+       if (ret < 0)
+               return ret;
+
+       eng->ops = ops;
+
+       ret = adf_overlay_engine_sysfs_init(eng);
+       if (ret < 0)
+               goto err;
+
+       return 0;
+
+err:
+       adf_obj_destroy(&eng->base, &dev->overlay_engines);
+       return ret;
+}
+EXPORT_SYMBOL(adf_overlay_engine_init);
+
+/**
+ * adf_interface_destroy - clean up ADF-internal data for an overlay engine
+ *
+ * @eng: the overlay engine
+ */
+void adf_overlay_engine_destroy(struct adf_overlay_engine *eng)
+{
+       struct adf_device *dev = adf_overlay_engine_parent(eng);
+       struct adf_attachment_list *entry, *next;
+
+       mutex_lock(&dev->client_lock);
+       list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+               if (entry->attachment.overlay_engine == eng) {
+                       adf_attachment_free(entry);
+                       dev->n_attach_allowed--;
+               }
+       }
+       list_for_each_entry_safe(entry, next, &dev->attached, head) {
+               if (entry->attachment.overlay_engine == eng) {
+                       adf_device_detach_op(dev, eng,
+                                       entry->attachment.interface);
+                       adf_attachment_free(entry);
+                       dev->n_attached--;
+               }
+       }
+       adf_overlay_engine_sysfs_destroy(eng);
+       adf_obj_destroy(&eng->base, &dev->overlay_engines);
+       mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_overlay_engine_destroy);
+
+struct adf_attachment_list *adf_attachment_find(struct list_head *list,
+               struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+       struct adf_attachment_list *entry;
+       list_for_each_entry(entry, list, head) {
+               if (entry->attachment.interface == intf &&
+                               entry->attachment.overlay_engine == eng)
+                       return entry;
+       }
+       return NULL;
+}
+
+int adf_attachment_validate(struct adf_device *dev,
+               struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+       struct adf_device *intf_dev = adf_interface_parent(intf);
+       struct adf_device *eng_dev = adf_overlay_engine_parent(eng);
+
+       if (intf_dev != dev) {
+               dev_err(&dev->base.dev, "can't attach interface %s belonging to device %s\n",
+                               intf->base.name, intf_dev->base.name);
+               return -EINVAL;
+       }
+
+       if (eng_dev != dev) {
+               dev_err(&dev->base.dev, "can't attach overlay engine %s belonging to device %s\n",
+                               eng->base.name, eng_dev->base.name);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * adf_attachment_allow - add a new entry to the list of allowed
+ * attachments
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * adf_attachment_allow() indicates that the underlying display hardware allows
+ * @intf to scan out @eng's output.  It is intended to be called at
+ * driver initialization for each supported overlay engine + interface pair.
+ *
+ * Returns 0 on success, -%EALREADY if the entry already exists, or -errno on
+ * any other failure.
+ */
+int adf_attachment_allow(struct adf_device *dev,
+               struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+       int ret;
+       struct adf_attachment_list *entry = NULL;
+
+       ret = adf_attachment_validate(dev, eng, intf);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&dev->client_lock);
+
+       if (dev->n_attach_allowed == ADF_MAX_ATTACHMENTS) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       if (adf_attachment_find(&dev->attach_allowed, eng, intf)) {
+               ret = -EALREADY;
+               goto done;
+       }
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       entry->attachment.interface = intf;
+       entry->attachment.overlay_engine = eng;
+       list_add_tail(&entry->head, &dev->attach_allowed);
+       dev->n_attach_allowed++;
+
+done:
+       mutex_unlock(&dev->client_lock);
+       if (ret < 0)
+               kfree(entry);
+
+       return ret;
+}
+EXPORT_SYMBOL(adf_attachment_allow);
+
+/**
+ * adf_obj_type_str - string representation of an adf_obj_type
+ *
+ * @type: the object type
+ */
+const char *adf_obj_type_str(enum adf_obj_type type)
+{
+       switch (type) {
+       case ADF_OBJ_OVERLAY_ENGINE:
+               return "overlay engine";
+
+       case ADF_OBJ_INTERFACE:
+               return "interface";
+
+       case ADF_OBJ_DEVICE:
+               return "device";
+
+       default:
+               return "unknown";
+       }
+}
+EXPORT_SYMBOL(adf_obj_type_str);
+
+/**
+ * adf_interface_type_str - string representation of an adf_interface's type
+ *
+ * @intf: the interface
+ */
+const char *adf_interface_type_str(struct adf_interface *intf)
+{
+       switch (intf->type) {
+       case ADF_INTF_DSI:
+               return "DSI";
+
+       case ADF_INTF_eDP:
+               return "eDP";
+
+       case ADF_INTF_DPI:
+               return "DPI";
+
+       case ADF_INTF_VGA:
+               return "VGA";
+
+       case ADF_INTF_DVI:
+               return "DVI";
+
+       case ADF_INTF_HDMI:
+               return "HDMI";
+
+       case ADF_INTF_MEMORY:
+               return "memory";
+
+       default:
+               if (intf->type >= ADF_INTF_TYPE_DEVICE_CUSTOM) {
+                       if (intf->ops && intf->ops->type_str)
+                               return intf->ops->type_str(intf);
+                       return "custom";
+               }
+               return "unknown";
+       }
+}
+EXPORT_SYMBOL(adf_interface_type_str);
+
+/**
+ * adf_event_type_str - string representation of an adf_event_type
+ *
+ * @obj: ADF object that produced the event
+ * @type: event type
+ */
+const char *adf_event_type_str(struct adf_obj *obj, enum adf_event_type type)
+{
+       switch (type) {
+       case ADF_EVENT_VSYNC:
+               return "vsync";
+
+       case ADF_EVENT_HOTPLUG:
+               return "hotplug";
+
+       default:
+               if (type >= ADF_EVENT_DEVICE_CUSTOM) {
+                       if (obj->ops && obj->ops->event_type_str)
+                               return obj->ops->event_type_str(obj, type);
+                       return "custom";
+               }
+               return "unknown";
+       }
+}
+EXPORT_SYMBOL(adf_event_type_str);
+
+/**
+ * adf_format_str - string representation of an ADF/DRM fourcc format
+ *
+ * @format: format fourcc
+ * @buf: target buffer for the format's string representation
+ */
+void adf_format_str(u32 format, char buf[ADF_FORMAT_STR_SIZE])
+{
+       buf[0] = format & 0xFF;
+       buf[1] = (format >> 8) & 0xFF;
+       buf[2] = (format >> 16) & 0xFF;
+       buf[3] = (format >> 24) & 0xFF;
+       buf[4] = '\0';
+}
+EXPORT_SYMBOL(adf_format_str);
+
+/**
+ * adf_format_validate_yuv - validate the number and size of planes in buffers
+ * with a custom YUV format.
+ *
+ * @dev: ADF device performing the validation
+ * @buf: buffer to validate
+ * @num_planes: expected number of planes
+ * @hsub: expected horizontal chroma subsampling factor, in pixels
+ * @vsub: expected vertical chroma subsampling factor, in pixels
+ * @cpp: expected bytes per pixel for each plane (length @num_planes)
+ *
+ * adf_format_validate_yuv() is intended to be called as a helper from @dev's
+ * validate_custom_format() op.
+ *
+ * Returns 0 if @buf has the expected number of planes and each plane
+ * has sufficient size, or -EINVAL otherwise.
+ */
+int adf_format_validate_yuv(struct adf_device *dev, struct adf_buffer *buf,
+               u8 num_planes, u8 hsub, u8 vsub, u8 cpp[])
+{
+       u8 i;
+
+       if (num_planes != buf->n_planes) {
+               char format_str[ADF_FORMAT_STR_SIZE];
+               adf_format_str(buf->format, format_str);
+               dev_err(&dev->base.dev, "%u planes expected for format %s but %u planes provided\n",
+                               num_planes, format_str, buf->n_planes);
+               return -EINVAL;
+       }
+
+       if (buf->w == 0 || buf->w % hsub) {
+               dev_err(&dev->base.dev, "bad buffer width %u\n", buf->w);
+               return -EINVAL;
+       }
+
+       if (buf->h == 0 || buf->h % vsub) {
+               dev_err(&dev->base.dev, "bad buffer height %u\n", buf->h);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < num_planes; i++) {
+               u32 width = buf->w / (i != 0 ? hsub : 1);
+               u32 height = buf->h / (i != 0 ? vsub : 1);
+               u8 cpp = adf_format_plane_cpp(buf->format, i);
+               u32 last_line_size;
+
+               if (buf->pitch[i] < (u64) width * cpp) {
+                       dev_err(&dev->base.dev, "plane %u pitch is shorter than buffer width (pitch = %u, width = %u, bpp = %u)\n",
+                                       i, buf->pitch[i], width, cpp * 8);
+                       return -EINVAL;
+               }
+
+               switch (dev->ops->quirks.buffer_padding) {
+               case ADF_BUFFER_PADDED_TO_PITCH:
+                       last_line_size = buf->pitch[i];
+                       break;
+
+               case ADF_BUFFER_UNPADDED:
+                       last_line_size = width * cpp;
+                       break;
+
+               default:
+                       BUG();
+               }
+
+               if ((u64) (height - 1) * buf->pitch[i] + last_line_size +
+                               buf->offset[i] > buf->dma_bufs[i]->size) {
+                       dev_err(&dev->base.dev, "plane %u buffer too small (height = %u, pitch = %u, offset = %u, size = %zu)\n",
+                                       i, height, buf->pitch[i],
+                                       buf->offset[i], buf->dma_bufs[i]->size);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(adf_format_validate_yuv);
+
+/**
+ * adf_modeinfo_set_name - sets the name of a mode from its display resolution
+ *
+ * @mode: mode
+ *
+ * adf_modeinfo_set_name() fills in @mode->name in the format
+ * "[hdisplay]x[vdisplay](i)".  It is intended to help drivers create
+ * ADF/DRM-style modelists from other mode formats.
+ */
+void adf_modeinfo_set_name(struct drm_mode_modeinfo *mode)
+{
+       bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+
+       snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+                mode->hdisplay, mode->vdisplay,
+                interlaced ? "i" : "");
+}
+EXPORT_SYMBOL(adf_modeinfo_set_name);
+
+/**
+ * adf_modeinfo_set_vrefresh - sets the vrefresh of a mode from its other
+ * timing data
+ *
+ * @mode: mode
+ *
+ * adf_modeinfo_set_vrefresh() calculates @mode->vrefresh from
+ * @mode->{h,v}display and @mode->flags.  It is intended to help drivers
+ * create ADF/DRM-style modelists from other mode formats.
+ */
+void adf_modeinfo_set_vrefresh(struct drm_mode_modeinfo *mode)
+{
+       int refresh = 0;
+       unsigned int calc_val;
+
+       if (mode->vrefresh > 0)
+               return;
+
+       if (mode->htotal <= 0 || mode->vtotal <= 0)
+               return;
+
+       /* work out vrefresh the value will be x1000 */
+       calc_val = (mode->clock * 1000);
+       calc_val /= mode->htotal;
+       refresh = (calc_val + mode->vtotal / 2) / mode->vtotal;
+
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               refresh *= 2;
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               refresh /= 2;
+       if (mode->vscan > 1)
+               refresh /= mode->vscan;
+
+       mode->vrefresh = refresh;
+}
+EXPORT_SYMBOL(adf_modeinfo_set_vrefresh);
+
+static int __init adf_init(void)
+{
+       int err;
+
+       err = adf_sysfs_init();
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static void __exit adf_exit(void)
+{
+       adf_sysfs_destroy();
+}
+
+module_init(adf_init);
+module_exit(adf_exit);
diff --git a/drivers/video/adf/adf.h b/drivers/video/adf/adf.h
new file mode 100644 (file)
index 0000000..3bcf1fa
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_H
+#define __VIDEO_ADF_ADF_H
+
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <video/adf.h>
+#include "sync.h"
+
+struct adf_event_refcount {
+       struct rb_node node;
+       enum adf_event_type type;
+       int refcount;
+};
+
+void adf_buffer_cleanup(struct adf_buffer *buf);
+void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
+               struct adf_buffer *buf);
+void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post);
+
+struct adf_attachment_list *adf_attachment_find(struct list_head *list,
+               struct adf_overlay_engine *eng, struct adf_interface *intf);
+int adf_attachment_validate(struct adf_device *dev,
+               struct adf_overlay_engine *eng, struct adf_interface *intf);
+void adf_attachment_free(struct adf_attachment_list *attachment);
+
+struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
+               enum adf_event_type type);
+
+static inline int adf_obj_check_supports_event(struct adf_obj *obj,
+               enum adf_event_type type)
+{
+       if (!obj->ops || !obj->ops->supports_event)
+               return -EOPNOTSUPP;
+       if (!obj->ops->supports_event(obj, type))
+               return -EINVAL;
+       return 0;
+}
+
+static inline int adf_device_attach_op(struct adf_device *dev,
+               struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+       if (!dev->ops->attach)
+               return 0;
+
+       return dev->ops->attach(dev, eng, intf);
+}
+
+static inline int adf_device_detach_op(struct adf_device *dev,
+               struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+       if (!dev->ops->detach)
+               return 0;
+
+       return dev->ops->detach(dev, eng, intf);
+}
+
+#endif /* __VIDEO_ADF_ADF_H */
diff --git a/drivers/video/adf/adf_client.c b/drivers/video/adf/adf_client.c
new file mode 100644 (file)
index 0000000..8061d8e
--- /dev/null
@@ -0,0 +1,811 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "sw_sync.h"
+
+#include <video/adf.h>
+#include <video/adf_client.h>
+#include <video/adf_format.h>
+
+#include "adf.h"
+
+static inline bool vsync_active(u8 state)
+{
+       return state == DRM_MODE_DPMS_ON || state == DRM_MODE_DPMS_STANDBY;
+}
+
+/**
+ * adf_interface_blank - set interface's DPMS state
+ *
+ * @intf: the interface
+ * @state: one of %DRM_MODE_DPMS_*
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_blank(struct adf_interface *intf, u8 state)
+{
+       struct adf_device *dev = adf_interface_parent(intf);
+       u8 prev_state;
+       bool disable_vsync;
+       bool enable_vsync;
+       int ret = 0;
+       struct adf_event_refcount *vsync_refcount;
+
+       if (!intf->ops || !intf->ops->blank)
+               return -EOPNOTSUPP;
+
+       if (state > DRM_MODE_DPMS_OFF)
+               return -EINVAL;
+
+       mutex_lock(&dev->client_lock);
+       if (state != DRM_MODE_DPMS_ON)
+               flush_kthread_worker(&dev->post_worker);
+       mutex_lock(&intf->base.event_lock);
+
+       vsync_refcount = adf_obj_find_event_refcount(&intf->base,
+                       ADF_EVENT_VSYNC);
+       if (!vsync_refcount) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       prev_state = intf->dpms_state;
+       if (prev_state == state) {
+               ret = -EBUSY;
+               goto done;
+       }
+
+       disable_vsync = vsync_active(prev_state) &&
+                       !vsync_active(state) &&
+                       vsync_refcount->refcount;
+       enable_vsync = !vsync_active(prev_state) &&
+                       vsync_active(state) &&
+                       vsync_refcount->refcount;
+
+       if (disable_vsync)
+               intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+                               false);
+
+       ret = intf->ops->blank(intf, state);
+       if (ret < 0) {
+               if (disable_vsync)
+                       intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+                                       true);
+               goto done;
+       }
+
+       if (enable_vsync)
+               intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+                               true);
+
+       intf->dpms_state = state;
+done:
+       mutex_unlock(&intf->base.event_lock);
+       mutex_unlock(&dev->client_lock);
+       return ret;
+}
+EXPORT_SYMBOL(adf_interface_blank);
+
+/**
+ * adf_interface_blank - get interface's current DPMS state
+ *
+ * @intf: the interface
+ *
+ * Returns one of %DRM_MODE_DPMS_*.
+ */
+u8 adf_interface_dpms_state(struct adf_interface *intf)
+{
+       struct adf_device *dev = adf_interface_parent(intf);
+       u8 dpms_state;
+
+       mutex_lock(&dev->client_lock);
+       dpms_state = intf->dpms_state;
+       mutex_unlock(&dev->client_lock);
+
+       return dpms_state;
+}
+EXPORT_SYMBOL(adf_interface_dpms_state);
+
+/**
+ * adf_interface_current_mode - get interface's current display mode
+ *
+ * @intf: the interface
+ * @mode: returns the current mode
+ */
+void adf_interface_current_mode(struct adf_interface *intf,
+               struct drm_mode_modeinfo *mode)
+{
+       struct adf_device *dev = adf_interface_parent(intf);
+
+       mutex_lock(&dev->client_lock);
+       memcpy(mode, &intf->current_mode, sizeof(*mode));
+       mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_interface_current_mode);
+
+/**
+ * adf_interface_modelist - get interface's modelist
+ *
+ * @intf: the interface
+ * @modelist: storage for the modelist (optional)
+ * @n_modes: length of @modelist
+ *
+ * If @modelist is not NULL, adf_interface_modelist() will copy up to @n_modes
+ * modelist entries into @modelist.
+ *
+ * Returns the length of the modelist.
+ */
+size_t adf_interface_modelist(struct adf_interface *intf,
+               struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+       unsigned long flags;
+       size_t retval;
+
+       read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+       if (modelist)
+               memcpy(modelist, intf->modelist, sizeof(modelist[0]) *
+                               min(n_modes, intf->n_modes));
+       retval = intf->n_modes;
+       read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+       return retval;
+}
+EXPORT_SYMBOL(adf_interface_modelist);
+
+/**
+ * adf_interface_set_mode - set interface's display mode
+ *
+ * @intf: the interface
+ * @mode: the new mode
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_set_mode(struct adf_interface *intf,
+               struct drm_mode_modeinfo *mode)
+{
+       struct adf_device *dev = adf_interface_parent(intf);
+       int ret = 0;
+
+       if (!intf->ops || !intf->ops->modeset)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&dev->client_lock);
+       flush_kthread_worker(&dev->post_worker);
+
+       ret = intf->ops->modeset(intf, mode);
+       if (ret < 0)
+               goto done;
+
+       memcpy(&intf->current_mode, mode, sizeof(*mode));
+done:
+       mutex_unlock(&dev->client_lock);
+       return ret;
+}
+EXPORT_SYMBOL(adf_interface_set_mode);
+
+/**
+ * adf_interface_screen_size - get size of screen connected to interface
+ *
+ * @intf: the interface
+ * @width_mm: returns the screen width in mm
+ * @height_mm: returns the screen width in mm
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_get_screen_size(struct adf_interface *intf, u16 *width_mm,
+               u16 *height_mm)
+{
+       struct adf_device *dev = adf_interface_parent(intf);
+       int ret;
+
+       if (!intf->ops || !intf->ops->screen_size)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&dev->client_lock);
+       ret = intf->ops->screen_size(intf, width_mm, height_mm);
+       mutex_unlock(&dev->client_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(adf_interface_get_screen_size);
+
+/**
+ * adf_overlay_engine_supports_format - returns whether a format is in an
+ * overlay engine's supported list
+ *
+ * @eng: the overlay engine
+ * @format: format fourcc
+ */
+bool adf_overlay_engine_supports_format(struct adf_overlay_engine *eng,
+               u32 format)
+{
+       size_t i;
+       for (i = 0; i < eng->ops->n_supported_formats; i++)
+               if (format == eng->ops->supported_formats[i])
+                       return true;
+
+       return false;
+}
+EXPORT_SYMBOL(adf_overlay_engine_supports_format);
+
+static int adf_buffer_validate(struct adf_buffer *buf)
+{
+       struct adf_overlay_engine *eng = buf->overlay_engine;
+       struct device *dev = &eng->base.dev;
+       struct adf_device *parent = adf_overlay_engine_parent(eng);
+       u8 hsub, vsub, num_planes, cpp[ADF_MAX_PLANES], i;
+
+       if (!adf_overlay_engine_supports_format(eng, buf->format)) {
+               char format_str[ADF_FORMAT_STR_SIZE];
+               adf_format_str(buf->format, format_str);
+               dev_err(dev, "unsupported format %s\n", format_str);
+               return -EINVAL;
+       }
+
+       if (!adf_format_is_standard(buf->format))
+               return parent->ops->validate_custom_format(parent, buf);
+
+       hsub = adf_format_horz_chroma_subsampling(buf->format);
+       vsub = adf_format_vert_chroma_subsampling(buf->format);
+       num_planes = adf_format_num_planes(buf->format);
+       for (i = 0; i < num_planes; i++)
+               cpp[i] = adf_format_plane_cpp(buf->format, i);
+
+       return adf_format_validate_yuv(parent, buf, num_planes, hsub, vsub,
+                       cpp);
+}
+
+static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf,
+               struct adf_buffer_mapping *mapping)
+{
+       int ret = 0;
+       size_t i;
+
+       for (i = 0; i < buf->n_planes; i++) {
+               struct dma_buf_attachment *attachment;
+               struct sg_table *sg_table;
+
+               attachment = dma_buf_attach(buf->dma_bufs[i], dev->dev);
+               if (IS_ERR(attachment)) {
+                       ret = PTR_ERR(attachment);
+                       dev_err(&dev->base.dev, "attaching plane %zu failed: %d\n",
+                                       i, ret);
+                       goto done;
+               }
+               mapping->attachments[i] = attachment;
+
+               sg_table = dma_buf_map_attachment(attachment, DMA_TO_DEVICE);
+               if (IS_ERR(sg_table)) {
+                       ret = PTR_ERR(sg_table);
+                       dev_err(&dev->base.dev, "mapping plane %zu failed: %d",
+                                       i, ret);
+                       goto done;
+               } else if (!sg_table) {
+                       ret = -ENOMEM;
+                       dev_err(&dev->base.dev, "mapping plane %zu failed\n",
+                                       i);
+                       goto done;
+               }
+               mapping->sg_tables[i] = sg_table;
+       }
+
+done:
+       if (ret < 0)
+               adf_buffer_mapping_cleanup(mapping, buf);
+
+       return ret;
+}
+
+static struct sync_fence *adf_sw_complete_fence(struct adf_device *dev)
+{
+       struct sync_pt *pt;
+       struct sync_fence *complete_fence;
+
+       if (!dev->timeline) {
+               dev->timeline = sw_sync_timeline_create(dev->base.name);
+               if (!dev->timeline)
+                       return ERR_PTR(-ENOMEM);
+               dev->timeline_max = 1;
+       }
+
+       dev->timeline_max++;
+       pt = sw_sync_pt_create(dev->timeline, dev->timeline_max);
+       if (!pt)
+               goto err_pt_create;
+       complete_fence = sync_fence_create(dev->base.name, pt);
+       if (!complete_fence)
+               goto err_fence_create;
+
+       return complete_fence;
+
+err_fence_create:
+       sync_pt_free(pt);
+err_pt_create:
+       dev->timeline_max--;
+       return ERR_PTR(-ENOSYS);
+}
+
+/**
+ * adf_device_post - flip to a new set of buffers
+ *
+ * @dev: device targeted by the flip
+ * @intfs: interfaces targeted by the flip
+ * @n_intfs: number of targeted interfaces
+ * @bufs: description of buffers displayed
+ * @n_bufs: number of buffers displayed
+ * @custom_data: driver-private data
+ * @custom_data_size: size of driver-private data
+ *
+ * adf_device_post() will copy @intfs, @bufs, and @custom_data, so they may
+ * point to variables on the stack.  adf_device_post() also takes its own
+ * reference on each of the dma-bufs in @bufs.  The adf_device_post_nocopy()
+ * variant transfers ownership of these resources to ADF instead.
+ *
+ * On success, returns a sync fence which signals when the buffers are removed
+ * from the screen.  On failure, returns ERR_PTR(-errno).
+ */
+struct sync_fence *adf_device_post(struct adf_device *dev,
+               struct adf_interface **intfs, size_t n_intfs,
+               struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
+               size_t custom_data_size)
+{
+       struct adf_interface **intfs_copy = NULL;
+       struct adf_buffer *bufs_copy = NULL;
+       void *custom_data_copy = NULL;
+       struct sync_fence *ret;
+       size_t i;
+
+       intfs_copy = kzalloc(sizeof(intfs_copy[0]) * n_intfs, GFP_KERNEL);
+       if (!intfs_copy)
+               return ERR_PTR(-ENOMEM);
+
+       bufs_copy = kzalloc(sizeof(bufs_copy[0]) * n_bufs, GFP_KERNEL);
+       if (!bufs_copy) {
+               ret = ERR_PTR(-ENOMEM);
+               goto err_alloc;
+       }
+
+       custom_data_copy = kzalloc(custom_data_size, GFP_KERNEL);
+       if (!custom_data_copy) {
+               ret = ERR_PTR(-ENOMEM);
+               goto err_alloc;
+       }
+
+       for (i = 0; i < n_bufs; i++) {
+               size_t j;
+               for (j = 0; j < bufs[i].n_planes; j++)
+                       get_dma_buf(bufs[i].dma_bufs[j]);
+       }
+
+       memcpy(intfs_copy, intfs, sizeof(intfs_copy[0]) * n_intfs);
+       memcpy(bufs_copy, bufs, sizeof(bufs_copy[0]) * n_bufs);
+       memcpy(custom_data_copy, custom_data, custom_data_size);
+
+       ret = adf_device_post_nocopy(dev, intfs_copy, n_intfs, bufs_copy,
+                       n_bufs, custom_data_copy, custom_data_size);
+       if (IS_ERR(ret))
+               goto err_post;
+
+       return ret;
+
+err_post:
+       for (i = 0; i < n_bufs; i++) {
+               size_t j;
+               for (j = 0; j < bufs[i].n_planes; j++)
+                       dma_buf_put(bufs[i].dma_bufs[j]);
+       }
+err_alloc:
+       kfree(custom_data_copy);
+       kfree(bufs_copy);
+       kfree(intfs_copy);
+       return ret;
+}
+EXPORT_SYMBOL(adf_device_post);
+
+/**
+ * adf_device_post_nocopy - flip to a new set of buffers
+ *
+ * adf_device_post_nocopy() has the same behavior as adf_device_post(),
+ * except ADF does not copy @intfs, @bufs, or @custom_data, and it does
+ * not take an extra reference on the dma-bufs in @bufs.
+ *
+ * @intfs, @bufs, and @custom_data must point to buffers allocated by
+ * kmalloc().  On success, ADF takes ownership of these buffers and the dma-bufs
+ * in @bufs, and will kfree()/dma_buf_put() them when they are no longer needed.
+ * On failure, adf_device_post_nocopy() does NOT take ownership of these
+ * buffers or the dma-bufs, and the caller must clean them up.
+ *
+ * adf_device_post_nocopy() is mainly intended for implementing ADF's ioctls.
+ * Clients may find the nocopy variant useful in limited cases, but most should
+ * call adf_device_post() instead.
+ */
+struct sync_fence *adf_device_post_nocopy(struct adf_device *dev,
+               struct adf_interface **intfs, size_t n_intfs,
+               struct adf_buffer *bufs, size_t n_bufs,
+               void *custom_data, size_t custom_data_size)
+{
+       struct adf_pending_post *cfg;
+       struct adf_buffer_mapping *mappings;
+       struct sync_fence *ret;
+       size_t i;
+       int err;
+
+       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+       if (!cfg)
+               return ERR_PTR(-ENOMEM);
+
+       mappings = kzalloc(sizeof(mappings[0]) * n_bufs, GFP_KERNEL);
+       if (!mappings) {
+               ret = ERR_PTR(-ENOMEM);
+               goto err_alloc;
+       }
+
+       mutex_lock(&dev->client_lock);
+
+       for (i = 0; i < n_bufs; i++) {
+               err = adf_buffer_validate(&bufs[i]);
+               if (err < 0) {
+                       ret = ERR_PTR(err);
+                       goto err_buf;
+               }
+
+               err = adf_buffer_map(dev, &bufs[i], &mappings[i]);
+               if (err < 0) {
+                       ret = ERR_PTR(err);
+                       goto err_buf;
+               }
+       }
+
+       INIT_LIST_HEAD(&cfg->head);
+       cfg->config.n_bufs = n_bufs;
+       cfg->config.bufs = bufs;
+       cfg->config.mappings = mappings;
+       cfg->config.custom_data = custom_data;
+       cfg->config.custom_data_size = custom_data_size;
+
+       err = dev->ops->validate(dev, &cfg->config, &cfg->state);
+       if (err < 0) {
+               ret = ERR_PTR(err);
+               goto err_buf;
+       }
+
+       mutex_lock(&dev->post_lock);
+
+       if (dev->ops->complete_fence)
+               ret = dev->ops->complete_fence(dev, &cfg->config,
+                               cfg->state);
+       else
+               ret = adf_sw_complete_fence(dev);
+
+       if (IS_ERR(ret))
+               goto err_fence;
+
+       list_add_tail(&cfg->head, &dev->post_list);
+       queue_kthread_work(&dev->post_worker, &dev->post_work);
+       mutex_unlock(&dev->post_lock);
+       mutex_unlock(&dev->client_lock);
+       kfree(intfs);
+       return ret;
+
+err_fence:
+       mutex_unlock(&dev->post_lock);
+
+err_buf:
+       for (i = 0; i < n_bufs; i++)
+               adf_buffer_mapping_cleanup(&mappings[i], &bufs[i]);
+
+       mutex_unlock(&dev->client_lock);
+       kfree(mappings);
+
+err_alloc:
+       kfree(cfg);
+       return ret;
+}
+EXPORT_SYMBOL(adf_device_post_nocopy);
+
+static void adf_attachment_list_to_array(struct adf_device *dev,
+               struct list_head *src, struct adf_attachment *dst, size_t size)
+{
+       struct adf_attachment_list *entry;
+       size_t i = 0;
+
+       if (!dst)
+               return;
+
+       list_for_each_entry(entry, src, head) {
+               if (i == size)
+                       return;
+               dst[i] = entry->attachment;
+               i++;
+       }
+}
+
+/**
+ * adf_device_attachments - get device's list of active attachments
+ *
+ * @dev: the device
+ * @attachments: storage for the attachment list (optional)
+ * @n_attachments: length of @attachments
+ *
+ * If @attachments is not NULL, adf_device_attachments() will copy up to
+ * @n_attachments entries into @attachments.
+ *
+ * Returns the length of the active attachment list.
+ */
+size_t adf_device_attachments(struct adf_device *dev,
+               struct adf_attachment *attachments, size_t n_attachments)
+{
+       size_t retval;
+
+       mutex_lock(&dev->client_lock);
+       adf_attachment_list_to_array(dev, &dev->attached, attachments,
+                       n_attachments);
+       retval = dev->n_attached;
+       mutex_unlock(&dev->client_lock);
+
+       return retval;
+}
+EXPORT_SYMBOL(adf_device_attachments);
+
+/**
+ * adf_device_attachments_allowed - get device's list of allowed attachments
+ *
+ * @dev: the device
+ * @attachments: storage for the attachment list (optional)
+ * @n_attachments: length of @attachments
+ *
+ * If @attachments is not NULL, adf_device_attachments_allowed() will copy up to
+ * @n_attachments entries into @attachments.
+ *
+ * Returns the length of the allowed attachment list.
+ */
+size_t adf_device_attachments_allowed(struct adf_device *dev,
+               struct adf_attachment *attachments, size_t n_attachments)
+{
+       size_t retval;
+
+       mutex_lock(&dev->client_lock);
+       adf_attachment_list_to_array(dev, &dev->attach_allowed, attachments,
+                       n_attachments);
+       retval = dev->n_attach_allowed;
+       mutex_unlock(&dev->client_lock);
+
+       return retval;
+}
+EXPORT_SYMBOL(adf_device_attachments_allowed);
+
+/**
+ * adf_device_attached - return whether an overlay engine and interface are
+ * attached
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ */
+bool adf_device_attached(struct adf_device *dev, struct adf_overlay_engine *eng,
+               struct adf_interface *intf)
+{
+       struct adf_attachment_list *attachment;
+
+       mutex_lock(&dev->client_lock);
+       attachment = adf_attachment_find(&dev->attached, eng, intf);
+       mutex_unlock(&dev->client_lock);
+
+       return attachment != NULL;
+}
+EXPORT_SYMBOL(adf_device_attached);
+
+/**
+ * adf_device_attach_allowed - return whether the ADF device supports attaching
+ * an overlay engine and interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ */
+bool adf_device_attach_allowed(struct adf_device *dev,
+               struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+       struct adf_attachment_list *attachment;
+
+       mutex_lock(&dev->client_lock);
+       attachment = adf_attachment_find(&dev->attach_allowed, eng, intf);
+       mutex_unlock(&dev->client_lock);
+
+       return attachment != NULL;
+}
+EXPORT_SYMBOL(adf_device_attach_allowed);
+/**
+ * adf_device_attach - attach an overlay engine to an interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * Returns 0 on success, -%EINVAL if attaching @intf and @eng is not allowed,
+ * -%EALREADY if @intf and @eng are already attached, or -errno on any other
+ * failure.
+ */
+int adf_device_attach(struct adf_device *dev, struct adf_overlay_engine *eng,
+               struct adf_interface *intf)
+{
+       int ret;
+       struct adf_attachment_list *attachment = NULL;
+
+       ret = adf_attachment_validate(dev, eng, intf);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&dev->client_lock);
+
+       if (dev->n_attached == ADF_MAX_ATTACHMENTS) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       if (!adf_attachment_find(&dev->attach_allowed, eng, intf)) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       if (adf_attachment_find(&dev->attached, eng, intf)) {
+               ret = -EALREADY;
+               goto done;
+       }
+
+       ret = adf_device_attach_op(dev, eng, intf);
+       if (ret < 0)
+               goto done;
+
+       attachment = kzalloc(sizeof(*attachment), GFP_KERNEL);
+       if (!attachment) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       attachment->attachment.interface = intf;
+       attachment->attachment.overlay_engine = eng;
+       list_add_tail(&attachment->head, &dev->attached);
+       dev->n_attached++;
+
+done:
+       mutex_unlock(&dev->client_lock);
+       if (ret < 0)
+               kfree(attachment);
+
+       return ret;
+}
+EXPORT_SYMBOL(adf_device_attach);
+
+/**
+ * adf_device_detach - detach an overlay engine from an interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * Returns 0 on success, -%EINVAL if @intf and @eng are not attached,
+ * or -errno on any other failure.
+ */
+int adf_device_detach(struct adf_device *dev, struct adf_overlay_engine *eng,
+               struct adf_interface *intf)
+{
+       int ret;
+       struct adf_attachment_list *attachment;
+
+       ret = adf_attachment_validate(dev, eng, intf);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&dev->client_lock);
+
+       attachment = adf_attachment_find(&dev->attached, eng, intf);
+       if (!attachment) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       ret = adf_device_detach_op(dev, eng, intf);
+       if (ret < 0)
+               goto done;
+
+       adf_attachment_free(attachment);
+       dev->n_attached--;
+done:
+       mutex_unlock(&dev->client_lock);
+       return ret;
+}
+EXPORT_SYMBOL(adf_device_detach);
+
+/**
+ * adf_interface_simple_buffer_alloc - allocate a simple buffer
+ *
+ * @intf: target interface
+ * @w: width in pixels
+ * @h: height in pixels
+ * @format: format fourcc
+ * @dma_buf: returns the allocated buffer
+ * @offset: returns the byte offset of the allocated buffer's first pixel
+ * @pitch: returns the allocated buffer's pitch
+ *
+ * See &struct adf_simple_buffer_alloc for a description of simple buffers and
+ * their limitations.
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_simple_buffer_alloc(struct adf_interface *intf, u16 w, u16 h,
+               u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch)
+{
+       if (!intf->ops || !intf->ops->alloc_simple_buffer)
+               return -EOPNOTSUPP;
+
+       if (!adf_format_is_rgb(format))
+               return -EINVAL;
+
+       return intf->ops->alloc_simple_buffer(intf, w, h, format, dma_buf,
+                       offset, pitch);
+}
+EXPORT_SYMBOL(adf_interface_simple_buffer_alloc);
+
+/**
+ * adf_interface_simple_post - flip to a single buffer
+ *
+ * @intf: interface targeted by the flip
+ * @buf: buffer to display
+ *
+ * adf_interface_simple_post() can be used generically for simple display
+ * configurations, since the client does not need to provide any driver-private
+ * configuration data.
+ *
+ * adf_interface_simple_post() has the same copying semantics as
+ * adf_device_post().
+ *
+ * On success, returns a sync fence which signals when the buffer is removed
+ * from the screen.  On failure, returns ERR_PTR(-errno).
+ */
+struct sync_fence *adf_interface_simple_post(struct adf_interface *intf,
+               struct adf_buffer *buf)
+{
+       size_t custom_data_size = 0;
+       void *custom_data = NULL;
+       struct sync_fence *ret;
+
+       if (intf->ops && intf->ops->describe_simple_post) {
+               int err;
+
+               custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
+               if (!custom_data) {
+                       ret = ERR_PTR(-ENOMEM);
+                       goto done;
+               }
+
+               err = intf->ops->describe_simple_post(intf, buf, custom_data,
+                               &custom_data_size);
+               if (err < 0) {
+                       ret = ERR_PTR(err);
+                       goto done;
+               }
+       }
+
+       ret = adf_device_post(adf_interface_parent(intf), &intf, 1, buf, 1,
+                       custom_data, custom_data_size);
+done:
+       kfree(custom_data);
+       return ret;
+}
+EXPORT_SYMBOL(adf_interface_simple_post);
diff --git a/drivers/video/adf/adf_fbdev.c b/drivers/video/adf/adf_fbdev.c
new file mode 100644 (file)
index 0000000..9d3c245
--- /dev/null
@@ -0,0 +1,665 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/vmalloc.h>
+
+#include <video/adf.h>
+#include <video/adf_client.h>
+#include <video/adf_fbdev.h>
+#include <video/adf_format.h>
+
+#include "adf.h"
+
+struct adf_fbdev_format {
+       u32 fourcc;
+       u32 bpp;
+       u32 r_length;
+       u32 g_length;
+       u32 b_length;
+       u32 a_length;
+       u32 r_offset;
+       u32 g_offset;
+       u32 b_offset;
+       u32 a_offset;
+};
+
+static const struct adf_fbdev_format format_table[] = {
+       {DRM_FORMAT_RGB332, 8, 3, 3, 2, 0, 5, 2, 0, 0},
+       {DRM_FORMAT_BGR233, 8, 3, 3, 2, 0, 0, 3, 5, 0},
+
+       {DRM_FORMAT_XRGB4444, 16, 4, 4, 4, 0, 8, 4, 0, 0},
+       {DRM_FORMAT_XBGR4444, 16, 4, 4, 4, 0, 0, 4, 8, 0},
+       {DRM_FORMAT_RGBX4444, 16, 4, 4, 4, 0, 12, 8, 4, 0},
+       {DRM_FORMAT_BGRX4444, 16, 4, 4, 4, 0, 0, 4, 8, 0},
+
+       {DRM_FORMAT_ARGB4444, 16, 4, 4, 4, 4, 8, 4, 0, 12},
+       {DRM_FORMAT_ABGR4444, 16, 4, 4, 4, 4, 0, 4, 8, 12},
+       {DRM_FORMAT_RGBA4444, 16, 4, 4, 4, 4, 12, 8, 4, 0},
+       {DRM_FORMAT_BGRA4444, 16, 4, 4, 4, 4, 0, 4, 8, 0},
+
+       {DRM_FORMAT_XRGB1555, 16, 5, 5, 5, 0, 10, 5, 0, 0},
+       {DRM_FORMAT_XBGR1555, 16, 5, 5, 5, 0, 0, 5, 10, 0},
+       {DRM_FORMAT_RGBX5551, 16, 5, 5, 5, 0, 11, 6, 1, 0},
+       {DRM_FORMAT_BGRX5551, 16, 5, 5, 5, 0, 1, 6, 11, 0},
+
+       {DRM_FORMAT_ARGB1555, 16, 5, 5, 5, 1, 10, 5, 0, 15},
+       {DRM_FORMAT_ABGR1555, 16, 5, 5, 5, 1, 0, 5, 10, 15},
+       {DRM_FORMAT_RGBA5551, 16, 5, 5, 5, 1, 11, 6, 1, 0},
+       {DRM_FORMAT_BGRA5551, 16, 5, 5, 5, 1, 1, 6, 11, 0},
+
+       {DRM_FORMAT_RGB565, 16, 5, 6, 5, 0, 11, 5, 0, 0},
+       {DRM_FORMAT_BGR565, 16, 5, 6, 5, 0, 0, 5, 11, 0},
+
+       {DRM_FORMAT_RGB888, 24, 8, 8, 8, 0, 16, 8, 0, 0},
+       {DRM_FORMAT_BGR888, 24, 8, 8, 8, 0, 0, 8, 16, 0},
+
+       {DRM_FORMAT_XRGB8888, 32, 8, 8, 8, 0, 16, 8, 0, 0},
+       {DRM_FORMAT_XBGR8888, 32, 8, 8, 8, 0, 0, 8, 16, 0},
+       {DRM_FORMAT_RGBX8888, 32, 8, 8, 8, 0, 24, 16, 8, 0},
+       {DRM_FORMAT_BGRX8888, 32, 8, 8, 8, 0, 8, 16, 24, 0},
+
+       {DRM_FORMAT_ARGB8888, 32, 8, 8, 8, 8, 16, 8, 0, 24},
+       {DRM_FORMAT_ABGR8888, 32, 8, 8, 8, 8, 0, 8, 16, 24},
+       {DRM_FORMAT_RGBA8888, 32, 8, 8, 8, 8, 24, 16, 8, 0},
+       {DRM_FORMAT_BGRA8888, 32, 8, 8, 8, 8, 8, 16, 24, 0},
+
+       {DRM_FORMAT_XRGB2101010, 32, 10, 10, 10, 0, 20, 10, 0, 0},
+       {DRM_FORMAT_XBGR2101010, 32, 10, 10, 10, 0, 0, 10, 20, 0},
+       {DRM_FORMAT_RGBX1010102, 32, 10, 10, 10, 0, 22, 12, 2, 0},
+       {DRM_FORMAT_BGRX1010102, 32, 10, 10, 10, 0, 2, 12, 22, 0},
+
+       {DRM_FORMAT_ARGB2101010, 32, 10, 10, 10, 2, 20, 10, 0, 30},
+       {DRM_FORMAT_ABGR2101010, 32, 10, 10, 10, 2, 0, 10, 20, 30},
+       {DRM_FORMAT_RGBA1010102, 32, 10, 10, 10, 2, 22, 12, 2, 0},
+       {DRM_FORMAT_BGRA1010102, 32, 10, 10, 10, 2, 2, 12, 22, 0},
+};
+
+static u32 drm_fourcc_from_fb_var(struct fb_var_screeninfo *var)
+{
+       size_t i;
+       for (i = 0; i < ARRAY_SIZE(format_table); i++) {
+               const struct adf_fbdev_format *f = &format_table[i];
+               if (var->red.length == f->r_length &&
+                       var->red.offset == f->r_offset &&
+                       var->green.length == f->g_length &&
+                       var->green.offset == f->g_offset &&
+                       var->blue.length == f->b_length &&
+                       var->blue.offset == f->b_offset &&
+                       var->transp.length == f->a_length &&
+                       (var->transp.length == 0 ||
+                                       var->transp.offset == f->a_offset))
+                       return f->fourcc;
+       }
+
+       return 0;
+}
+
+static const struct adf_fbdev_format *fbdev_format_info(u32 format)
+{
+       size_t i;
+       for (i = 0; i < ARRAY_SIZE(format_table); i++) {
+               const struct adf_fbdev_format *f = &format_table[i];
+               if (f->fourcc == format)
+                       return f;
+       }
+
+       BUG();
+}
+
+void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
+               struct fb_videomode *vmode)
+{
+       memset(vmode, 0, sizeof(*vmode));
+
+       vmode->refresh = mode->vrefresh;
+
+       vmode->xres = mode->hdisplay;
+       vmode->yres = mode->vdisplay;
+
+       vmode->pixclock = mode->clock ? KHZ2PICOS(mode->clock) : 0;
+       vmode->left_margin = mode->htotal - mode->hsync_end;
+       vmode->right_margin = mode->hsync_start - mode->hdisplay;
+       vmode->upper_margin = mode->vtotal - mode->vsync_end;
+       vmode->lower_margin = mode->vsync_start - mode->vdisplay;
+       vmode->hsync_len = mode->hsync_end - mode->hsync_start;
+       vmode->vsync_len = mode->vsync_end - mode->vsync_start;
+
+       vmode->sync = 0;
+       if (mode->flags | DRM_MODE_FLAG_PHSYNC)
+               vmode->sync |= FB_SYNC_HOR_HIGH_ACT;
+       if (mode->flags | DRM_MODE_FLAG_PVSYNC)
+               vmode->sync |= FB_SYNC_VERT_HIGH_ACT;
+       if (mode->flags | DRM_MODE_FLAG_PCSYNC)
+               vmode->sync |= FB_SYNC_COMP_HIGH_ACT;
+       if (mode->flags | DRM_MODE_FLAG_BCAST)
+               vmode->sync |= FB_SYNC_BROADCAST;
+
+       vmode->vmode = 0;
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               vmode->vmode |= FB_VMODE_INTERLACED;
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               vmode->vmode |= FB_VMODE_DOUBLE;
+}
+EXPORT_SYMBOL(adf_modeinfo_to_fb_videomode);
+
+void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode,
+               struct drm_mode_modeinfo *mode)
+{
+       memset(mode, 0, sizeof(*mode));
+
+       mode->hdisplay = vmode->xres;
+       mode->hsync_start = mode->hdisplay + vmode->right_margin;
+       mode->hsync_end = mode->hsync_start + vmode->hsync_len;
+       mode->htotal = mode->hsync_end + vmode->left_margin;
+
+       mode->vdisplay = vmode->yres;
+       mode->vsync_start = mode->vdisplay + vmode->lower_margin;
+       mode->vsync_end = mode->vsync_start + vmode->vsync_len;
+       mode->vtotal = mode->vsync_end + vmode->upper_margin;
+
+       mode->clock = vmode->pixclock ? PICOS2KHZ(vmode->pixclock) : 0;
+
+       mode->flags = 0;
+       if (vmode->sync & FB_SYNC_HOR_HIGH_ACT)
+               mode->flags |= DRM_MODE_FLAG_PHSYNC;
+       if (vmode->sync & FB_SYNC_VERT_HIGH_ACT)
+               mode->flags |= DRM_MODE_FLAG_PVSYNC;
+       if (vmode->sync & FB_SYNC_COMP_HIGH_ACT)
+               mode->flags |= DRM_MODE_FLAG_PCSYNC;
+       if (vmode->sync & FB_SYNC_BROADCAST)
+               mode->flags |= DRM_MODE_FLAG_BCAST;
+       if (vmode->vmode & FB_VMODE_INTERLACED)
+               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+       if (vmode->vmode & FB_VMODE_DOUBLE)
+               mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+       if (vmode->refresh)
+               mode->vrefresh = vmode->refresh;
+       else
+               adf_modeinfo_set_vrefresh(mode);
+
+       if (vmode->name)
+               strlcpy(mode->name, vmode->name, sizeof(mode->name));
+       else
+               adf_modeinfo_set_name(mode);
+}
+EXPORT_SYMBOL(adf_modeinfo_from_fb_videomode);
+
+static int adf_fbdev_post(struct adf_fbdev *fbdev)
+{
+       struct adf_buffer buf;
+       struct sync_fence *complete_fence;
+       int ret = 0;
+
+       memset(&buf, 0, sizeof(buf));
+       buf.overlay_engine = fbdev->eng;
+       buf.w = fbdev->info->var.xres;
+       buf.h = fbdev->info->var.yres;
+       buf.format = fbdev->format;
+       buf.dma_bufs[0] = fbdev->dma_buf;
+       buf.offset[0] = fbdev->offset +
+                       fbdev->info->var.yoffset * fbdev->pitch +
+                       fbdev->info->var.xoffset *
+                       (fbdev->info->var.bits_per_pixel / 8);
+       buf.pitch[0] = fbdev->pitch;
+       buf.n_planes = 1;
+
+       complete_fence = adf_interface_simple_post(fbdev->intf, &buf);
+       if (IS_ERR(complete_fence)) {
+               ret = PTR_ERR(complete_fence);
+               goto done;
+       }
+
+       sync_fence_put(complete_fence);
+done:
+       return ret;
+}
+
+static const u16 vga_palette[][3] = {
+       {0x0000, 0x0000, 0x0000},
+       {0x0000, 0x0000, 0xAAAA},
+       {0x0000, 0xAAAA, 0x0000},
+       {0x0000, 0xAAAA, 0xAAAA},
+       {0xAAAA, 0x0000, 0x0000},
+       {0xAAAA, 0x0000, 0xAAAA},
+       {0xAAAA, 0x5555, 0x0000},
+       {0xAAAA, 0xAAAA, 0xAAAA},
+       {0x5555, 0x5555, 0x5555},
+       {0x5555, 0x5555, 0xFFFF},
+       {0x5555, 0xFFFF, 0x5555},
+       {0x5555, 0xFFFF, 0xFFFF},
+       {0xFFFF, 0x5555, 0x5555},
+       {0xFFFF, 0x5555, 0xFFFF},
+       {0xFFFF, 0xFFFF, 0x5555},
+       {0xFFFF, 0xFFFF, 0xFFFF},
+};
+
+static int adf_fb_alloc(struct adf_fbdev *fbdev)
+{
+       int ret;
+
+       ret = adf_interface_simple_buffer_alloc(fbdev->intf,
+                       fbdev->default_xres_virtual,
+                       fbdev->default_yres_virtual,
+                       fbdev->default_format,
+                       &fbdev->dma_buf, &fbdev->offset, &fbdev->pitch);
+       if (ret < 0) {
+               dev_err(fbdev->info->dev, "allocating fb failed: %d\n", ret);
+               return ret;
+       }
+
+       fbdev->vaddr = dma_buf_vmap(fbdev->dma_buf);
+       if (!fbdev->vaddr) {
+               ret = -ENOMEM;
+               dev_err(fbdev->info->dev, "vmapping fb failed\n");
+               goto err_vmap;
+       }
+       fbdev->info->fix.line_length = fbdev->pitch;
+       fbdev->info->var.xres_virtual = fbdev->default_xres_virtual;
+       fbdev->info->var.yres_virtual = fbdev->default_yres_virtual;
+       fbdev->info->fix.smem_len = fbdev->dma_buf->size;
+       fbdev->info->screen_base = fbdev->vaddr;
+
+       return 0;
+
+err_vmap:
+       dma_buf_put(fbdev->dma_buf);
+       return ret;
+}
+
+static void adf_fb_destroy(struct adf_fbdev *fbdev)
+{
+       dma_buf_vunmap(fbdev->dma_buf, fbdev->vaddr);
+       dma_buf_put(fbdev->dma_buf);
+}
+
+static void adf_fbdev_set_format(struct adf_fbdev *fbdev, u32 format)
+{
+       size_t i;
+       const struct adf_fbdev_format *info = fbdev_format_info(format);
+       for (i = 0; i < ARRAY_SIZE(vga_palette); i++) {
+               u16 r = vga_palette[i][0];
+               u16 g = vga_palette[i][1];
+               u16 b = vga_palette[i][2];
+
+               r >>= (16 - info->r_length);
+               g >>= (16 - info->g_length);
+               b >>= (16 - info->b_length);
+
+               fbdev->pseudo_palette[i] =
+                       (r << info->r_offset) |
+                       (g << info->g_offset) |
+                       (b << info->b_offset);
+
+               if (info->a_length) {
+                       u16 a = BIT(info->a_length) - 1;
+                       fbdev->pseudo_palette[i] |= (a << info->a_offset);
+               }
+       }
+
+       fbdev->info->var.bits_per_pixel = adf_format_bpp(format);
+       fbdev->info->var.red.length = info->r_length;
+       fbdev->info->var.red.offset = info->r_offset;
+       fbdev->info->var.green.length = info->g_length;
+       fbdev->info->var.green.offset = info->g_offset;
+       fbdev->info->var.blue.length = info->b_length;
+       fbdev->info->var.blue.offset = info->b_offset;
+       fbdev->info->var.transp.length = info->a_length;
+       fbdev->info->var.transp.offset = info->a_offset;
+       fbdev->format = format;
+}
+
+static void adf_fbdev_fill_modelist(struct adf_fbdev *fbdev)
+{
+       struct drm_mode_modeinfo *modelist;
+       struct fb_videomode fbmode;
+       size_t n_modes, i;
+       int ret = 0;
+
+       n_modes = adf_interface_modelist(fbdev->intf, NULL, 0);
+       modelist = kzalloc(sizeof(modelist[0]) * n_modes, GFP_KERNEL);
+       if (!modelist) {
+               dev_warn(fbdev->info->dev, "allocating new modelist failed; keeping old modelist\n");
+               return;
+       }
+       adf_interface_modelist(fbdev->intf, modelist, n_modes);
+
+       fb_destroy_modelist(&fbdev->info->modelist);
+
+       for (i = 0; i < n_modes; i++) {
+               adf_modeinfo_to_fb_videomode(&modelist[i], &fbmode);
+               ret = fb_add_videomode(&fbmode, &fbdev->info->modelist);
+               if (ret < 0)
+                       dev_warn(fbdev->info->dev, "adding mode %s to modelist failed: %d\n",
+                                       modelist[i].name, ret);
+       }
+
+       kfree(modelist);
+}
+
+/**
+ * adf_fbdev_open - default implementation of fbdev open op
+ */
+int adf_fbdev_open(struct fb_info *info, int user)
+{
+       struct adf_fbdev *fbdev = info->par;
+       int ret;
+
+       mutex_lock(&fbdev->refcount_lock);
+
+       if (unlikely(fbdev->refcount == UINT_MAX)) {
+               ret = -EMFILE;
+               goto done;
+       }
+
+       if (!fbdev->refcount) {
+               struct drm_mode_modeinfo mode;
+               struct fb_videomode fbmode;
+               struct adf_device *dev = adf_interface_parent(fbdev->intf);
+
+               ret = adf_device_attach(dev, fbdev->eng, fbdev->intf);
+               if (ret < 0 && ret != -EALREADY)
+                       goto done;
+
+               ret = adf_fb_alloc(fbdev);
+               if (ret < 0)
+                       goto done;
+
+               adf_interface_current_mode(fbdev->intf, &mode);
+               adf_modeinfo_to_fb_videomode(&mode, &fbmode);
+               fb_videomode_to_var(&fbdev->info->var, &fbmode);
+
+               adf_fbdev_set_format(fbdev, fbdev->default_format);
+               adf_fbdev_fill_modelist(fbdev);
+       }
+
+       ret = adf_fbdev_post(fbdev);
+       if (ret < 0) {
+               if (!fbdev->refcount)
+                       adf_fb_destroy(fbdev);
+               goto done;
+       }
+
+       fbdev->refcount++;
+done:
+       mutex_unlock(&fbdev->refcount_lock);
+       return ret;
+}
+EXPORT_SYMBOL(adf_fbdev_open);
+
+/**
+ * adf_fbdev_release - default implementation of fbdev release op
+ */
+int adf_fbdev_release(struct fb_info *info, int user)
+{
+       struct adf_fbdev *fbdev = info->par;
+       mutex_lock(&fbdev->refcount_lock);
+       BUG_ON(!fbdev->refcount);
+       fbdev->refcount--;
+       if (!fbdev->refcount)
+               adf_fb_destroy(fbdev);
+       mutex_unlock(&fbdev->refcount_lock);
+       return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_release);
+
+/**
+ * adf_fbdev_check_var - default implementation of fbdev check_var op
+ */
+int adf_fbdev_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+       struct adf_fbdev *fbdev = info->par;
+       bool valid_format = true;
+       u32 format = drm_fourcc_from_fb_var(var);
+       u32 pitch = var->xres_virtual * var->bits_per_pixel / 8;
+
+       if (!format) {
+               dev_dbg(info->dev, "%s: unrecognized format\n", __func__);
+               valid_format = false;
+       }
+
+       if (valid_format && var->grayscale) {
+               dev_dbg(info->dev, "%s: grayscale modes not supported\n",
+                               __func__);
+               valid_format = false;
+       }
+
+       if (valid_format && var->nonstd) {
+               dev_dbg(info->dev, "%s: nonstandard formats not supported\n",
+                               __func__);
+               valid_format = false;
+       }
+
+       if (valid_format && !adf_overlay_engine_supports_format(fbdev->eng,
+                       format)) {
+               char format_str[ADF_FORMAT_STR_SIZE];
+               adf_format_str(format, format_str);
+               dev_dbg(info->dev, "%s: format %s not supported by overlay engine %s\n",
+                               __func__, format_str, fbdev->eng->base.name);
+               valid_format = false;
+       }
+
+       if (valid_format && pitch > fbdev->pitch) {
+               dev_dbg(info->dev, "%s: fb pitch too small for var (pitch = %u, xres_virtual = %u, bits_per_pixel = %u)\n",
+                               __func__, fbdev->pitch, var->xres_virtual,
+                               var->bits_per_pixel);
+               valid_format = false;
+       }
+
+       if (valid_format && var->yres_virtual > fbdev->default_yres_virtual) {
+               dev_dbg(info->dev, "%s: fb height too small for var (h = %u, yres_virtual = %u)\n",
+                               __func__, fbdev->default_yres_virtual,
+                               var->yres_virtual);
+               valid_format = false;
+       }
+
+       if (valid_format) {
+               var->activate = info->var.activate;
+               var->height = info->var.height;
+               var->width = info->var.width;
+               var->accel_flags = info->var.accel_flags;
+               var->rotate = info->var.rotate;
+               var->colorspace = info->var.colorspace;
+               /* userspace can't change these */
+       } else {
+               /* if any part of the format is invalid then fixing it up is
+                  impractical, so save just the modesetting bits and
+                  overwrite everything else */
+               struct fb_videomode mode;
+               fb_var_to_videomode(&mode, var);
+               memcpy(var, &info->var, sizeof(*var));
+               fb_videomode_to_var(var, &mode);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_check_var);
+
+/**
+ * adf_fbdev_set_par - default implementation of fbdev set_par op
+ */
+int adf_fbdev_set_par(struct fb_info *info)
+{
+       struct adf_fbdev *fbdev = info->par;
+       struct adf_interface *intf = fbdev->intf;
+       struct fb_videomode vmode;
+       struct drm_mode_modeinfo mode;
+       int ret;
+       u32 format = drm_fourcc_from_fb_var(&info->var);
+
+       fb_var_to_videomode(&vmode, &info->var);
+       adf_modeinfo_from_fb_videomode(&vmode, &mode);
+       ret = adf_interface_set_mode(intf, &mode);
+       if (ret < 0)
+               return ret;
+
+       ret = adf_fbdev_post(fbdev);
+       if (ret < 0)
+               return ret;
+
+       if (format != fbdev->format)
+               adf_fbdev_set_format(fbdev, format);
+
+       return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_set_par);
+
+/**
+ * adf_fbdev_blank - default implementation of fbdev blank op
+ */
+int adf_fbdev_blank(int blank, struct fb_info *info)
+{
+       struct adf_fbdev *fbdev = info->par;
+       struct adf_interface *intf = fbdev->intf;
+       u8 dpms_state;
+
+       switch (blank) {
+       case FB_BLANK_UNBLANK:
+               dpms_state = DRM_MODE_DPMS_ON;
+               break;
+       case FB_BLANK_NORMAL:
+               dpms_state = DRM_MODE_DPMS_STANDBY;
+               break;
+       case FB_BLANK_VSYNC_SUSPEND:
+               dpms_state = DRM_MODE_DPMS_SUSPEND;
+               break;
+       case FB_BLANK_HSYNC_SUSPEND:
+               dpms_state = DRM_MODE_DPMS_STANDBY;
+               break;
+       case FB_BLANK_POWERDOWN:
+               dpms_state = DRM_MODE_DPMS_OFF;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return adf_interface_blank(intf, dpms_state);
+}
+EXPORT_SYMBOL(adf_fbdev_blank);
+
+/**
+ * adf_fbdev_pan_display - default implementation of fbdev pan_display op
+ */
+int adf_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+       struct adf_fbdev *fbdev = info->par;
+       return adf_fbdev_post(fbdev);
+}
+EXPORT_SYMBOL(adf_fbdev_pan_display);
+
+/**
+ * adf_fbdev_mmap - default implementation of fbdev mmap op
+ */
+int adf_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+       struct adf_fbdev *fbdev = info->par;
+
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       return dma_buf_mmap(fbdev->dma_buf, vma, 0);
+}
+EXPORT_SYMBOL(adf_fbdev_mmap);
+
+/**
+ * adf_fbdev_init - initialize helper to wrap ADF device in fbdev API
+ *
+ * @fbdev: the fbdev helper
+ * @interface: the ADF interface that will display the framebuffer
+ * @eng: the ADF overlay engine that will scan out the framebuffer
+ * @xres_virtual: the virtual width of the framebuffer
+ * @yres_virtual: the virtual height of the framebuffer
+ * @format: the format of the framebuffer
+ * @fbops: the device's fbdev ops
+ * @fmt: formatting for the framebuffer identification string
+ * @...: variable arguments
+ *
+ * @format must be a standard, non-indexed RGB format, i.e.,
+ * adf_format_is_rgb(@format) && @format != @DRM_FORMAT_C8.
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_fbdev_init(struct adf_fbdev *fbdev, struct adf_interface *interface,
+               struct adf_overlay_engine *eng,
+               u16 xres_virtual, u16 yres_virtual, u32 format,
+               struct fb_ops *fbops, const char *fmt, ...)
+{
+       struct adf_device *parent = adf_interface_parent(interface);
+       struct device *dev = &parent->base.dev;
+       u16 width_mm, height_mm;
+       va_list args;
+       int ret;
+
+       if (!adf_format_is_rgb(format) ||
+                       format == DRM_FORMAT_C8) {
+               dev_err(dev, "fbdev helper does not support format %u\n",
+                               format);
+               return -EINVAL;
+       }
+
+       memset(fbdev, 0, sizeof(*fbdev));
+       fbdev->intf = interface;
+       fbdev->eng = eng;
+       fbdev->info = framebuffer_alloc(0, dev);
+       if (!fbdev->info) {
+               dev_err(dev, "allocating framebuffer device failed\n");
+               return -ENOMEM;
+       }
+       mutex_init(&fbdev->refcount_lock);
+       fbdev->default_xres_virtual = xres_virtual;
+       fbdev->default_yres_virtual = yres_virtual;
+       fbdev->default_format = format;
+
+       fbdev->info->flags = FBINFO_FLAG_DEFAULT;
+       ret = adf_interface_get_screen_size(interface, &width_mm, &height_mm);
+       if (ret < 0) {
+               width_mm = 0;
+               height_mm = 0;
+       }
+       fbdev->info->var.width = width_mm;
+       fbdev->info->var.height = height_mm;
+       fbdev->info->var.activate = FB_ACTIVATE_VBL;
+       va_start(args, fmt);
+       vsnprintf(fbdev->info->fix.id, sizeof(fbdev->info->fix.id), fmt, args);
+       va_end(args);
+       fbdev->info->fix.type = FB_TYPE_PACKED_PIXELS;
+       fbdev->info->fix.visual = FB_VISUAL_TRUECOLOR;
+       fbdev->info->fix.xpanstep = 1;
+       fbdev->info->fix.ypanstep = 1;
+       INIT_LIST_HEAD(&fbdev->info->modelist);
+       fbdev->info->fbops = fbops;
+       fbdev->info->pseudo_palette = fbdev->pseudo_palette;
+       fbdev->info->par = fbdev;
+
+       ret = register_framebuffer(fbdev->info);
+       if (ret < 0) {
+               dev_err(dev, "registering framebuffer failed: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_init);
+
+/**
+ * adf_fbdev_destroy - destroy helper to wrap ADF device in fbdev API
+ *
+ * @fbdev: the fbdev helper
+ */
+void adf_fbdev_destroy(struct adf_fbdev *fbdev)
+{
+       unregister_framebuffer(fbdev->info);
+       BUG_ON(fbdev->refcount);
+       mutex_destroy(&fbdev->refcount_lock);
+       framebuffer_release(fbdev->info);
+}
+EXPORT_SYMBOL(adf_fbdev_destroy);
diff --git a/drivers/video/adf/adf_fops.c b/drivers/video/adf/adf_fops.c
new file mode 100644 (file)
index 0000000..7fbf33e
--- /dev/null
@@ -0,0 +1,957 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/circ_buf.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <video/adf_client.h>
+#include <video/adf_format.h>
+
+#include "sw_sync.h"
+#include "sync.h"
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+#ifdef CONFIG_COMPAT
+#include "adf_fops32.h"
+#endif
+
+static int adf_obj_set_event(struct adf_obj *obj, struct adf_file *file,
+               struct adf_set_event __user *arg)
+{
+       struct adf_set_event data;
+       bool enabled;
+       unsigned long flags;
+       int err;
+
+       if (copy_from_user(&data, arg, sizeof(data)))
+               return -EFAULT;
+
+       err = adf_obj_check_supports_event(obj, data.type);
+       if (err < 0)
+               return err;
+
+       spin_lock_irqsave(&obj->file_lock, flags);
+       if (data.enabled)
+               enabled = test_and_set_bit(data.type,
+                               file->event_subscriptions);
+       else
+               enabled = test_and_clear_bit(data.type,
+                               file->event_subscriptions);
+       spin_unlock_irqrestore(&obj->file_lock, flags);
+
+       if (data.enabled == enabled)
+               return -EALREADY;
+
+       if (data.enabled)
+               adf_event_get(obj, data.type);
+       else
+               adf_event_put(obj, data.type);
+
+       return 0;
+}
+
+static int adf_obj_copy_custom_data_to_user(struct adf_obj *obj,
+               void __user *dst, size_t *dst_size)
+{
+       void *custom_data;
+       size_t custom_data_size;
+       int ret;
+
+       if (!obj->ops || !obj->ops->custom_data) {
+               dev_dbg(&obj->dev, "%s: no custom_data op\n", __func__);
+               return 0;
+       }
+
+       custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
+       if (!custom_data)
+               return -ENOMEM;
+
+       ret = obj->ops->custom_data(obj, custom_data, &custom_data_size);
+       if (ret < 0)
+               goto done;
+
+       if (copy_to_user(dst, custom_data, min(*dst_size, custom_data_size))) {
+               ret = -EFAULT;
+               goto done;
+       }
+       *dst_size = custom_data_size;
+
+done:
+       kfree(custom_data);
+       return ret;
+}
+
+static int adf_eng_get_data(struct adf_overlay_engine *eng,
+               struct adf_overlay_engine_data __user *arg)
+{
+       struct adf_device *dev = adf_overlay_engine_parent(eng);
+       struct adf_overlay_engine_data data;
+       size_t n_supported_formats;
+       u32 *supported_formats = NULL;
+       int ret = 0;
+
+       if (copy_from_user(&data, arg, sizeof(data)))
+               return -EFAULT;
+
+       strlcpy(data.name, eng->base.name, sizeof(data.name));
+
+       if (data.n_supported_formats > ADF_MAX_SUPPORTED_FORMATS)
+               return -EINVAL;
+
+       n_supported_formats = data.n_supported_formats;
+       data.n_supported_formats = eng->ops->n_supported_formats;
+
+       if (n_supported_formats) {
+               supported_formats = kzalloc(n_supported_formats *
+                               sizeof(supported_formats[0]), GFP_KERNEL);
+               if (!supported_formats)
+                       return -ENOMEM;
+       }
+
+       memcpy(supported_formats, eng->ops->supported_formats,
+                       sizeof(u32) * min(n_supported_formats,
+                                       eng->ops->n_supported_formats));
+
+       mutex_lock(&dev->client_lock);
+       ret = adf_obj_copy_custom_data_to_user(&eng->base, arg->custom_data,
+                       &data.custom_data_size);
+       mutex_unlock(&dev->client_lock);
+
+       if (ret < 0)
+               goto done;
+
+       if (copy_to_user(arg, &data, sizeof(data))) {
+               ret = -EFAULT;
+               goto done;
+       }
+
+       if (supported_formats && copy_to_user(arg->supported_formats,
+                       supported_formats,
+                       n_supported_formats * sizeof(supported_formats[0])))
+               ret = -EFAULT;
+
+done:
+       kfree(supported_formats);
+       return ret;
+}
+
+static int adf_buffer_import(struct adf_device *dev,
+               struct adf_buffer_config __user *cfg, struct adf_buffer *buf)
+{
+       struct adf_buffer_config user_buf;
+       size_t i;
+       int ret = 0;
+
+       if (copy_from_user(&user_buf, cfg, sizeof(user_buf)))
+               return -EFAULT;
+
+       memset(buf, 0, sizeof(*buf));
+
+       if (user_buf.n_planes > ADF_MAX_PLANES) {
+               dev_err(&dev->base.dev, "invalid plane count %u\n",
+                               user_buf.n_planes);
+               return -EINVAL;
+       }
+
+       buf->overlay_engine = idr_find(&dev->overlay_engines,
+                       user_buf.overlay_engine);
+       if (!buf->overlay_engine) {
+               dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
+                               user_buf.overlay_engine);
+               return -ENOENT;
+       }
+
+       buf->w = user_buf.w;
+       buf->h = user_buf.h;
+       buf->format = user_buf.format;
+       for (i = 0; i < user_buf.n_planes; i++) {
+               buf->dma_bufs[i] = dma_buf_get(user_buf.fd[i]);
+               if (IS_ERR(buf->dma_bufs[i])) {
+                       ret = PTR_ERR(buf->dma_bufs[i]);
+                       dev_err(&dev->base.dev, "importing dma_buf fd %d failed: %d\n",
+                                       user_buf.fd[i], ret);
+                       buf->dma_bufs[i] = NULL;
+                       goto done;
+               }
+               buf->offset[i] = user_buf.offset[i];
+               buf->pitch[i] = user_buf.pitch[i];
+       }
+       buf->n_planes = user_buf.n_planes;
+
+       if (user_buf.acquire_fence >= 0) {
+               buf->acquire_fence = sync_fence_fdget(user_buf.acquire_fence);
+               if (!buf->acquire_fence) {
+                       dev_err(&dev->base.dev, "getting fence fd %d failed\n",
+                                       user_buf.acquire_fence);
+                       ret = -EINVAL;
+                       goto done;
+               }
+       }
+
+done:
+       if (ret < 0)
+               adf_buffer_cleanup(buf);
+       return ret;
+}
+
+static int adf_device_post_config(struct adf_device *dev,
+               struct adf_post_config __user *arg)
+{
+       struct sync_fence *complete_fence;
+       int complete_fence_fd;
+       struct adf_buffer *bufs = NULL;
+       struct adf_interface **intfs = NULL;
+       size_t n_intfs, n_bufs, i;
+       void *custom_data = NULL;
+       size_t custom_data_size;
+       int ret = 0;
+
+       complete_fence_fd = get_unused_fd();
+       if (complete_fence_fd < 0)
+               return complete_fence_fd;
+
+       if (get_user(n_intfs, &arg->n_interfaces)) {
+               ret = -EFAULT;
+               goto err_get_user;
+       }
+
+       if (n_intfs > ADF_MAX_INTERFACES) {
+               ret = -EINVAL;
+               goto err_get_user;
+       }
+
+       if (get_user(n_bufs, &arg->n_bufs)) {
+               ret = -EFAULT;
+               goto err_get_user;
+       }
+
+       if (n_bufs > ADF_MAX_BUFFERS) {
+               ret = -EINVAL;
+               goto err_get_user;
+       }
+
+       if (get_user(custom_data_size, &arg->custom_data_size)) {
+               ret = -EFAULT;
+               goto err_get_user;
+       }
+
+       if (custom_data_size > ADF_MAX_CUSTOM_DATA_SIZE) {
+               ret = -EINVAL;
+               goto err_get_user;
+       }
+
+       if (n_intfs) {
+               intfs = kmalloc(sizeof(intfs[0]) * n_intfs, GFP_KERNEL);
+               if (!intfs) {
+                       ret = -ENOMEM;
+                       goto err_get_user;
+               }
+       }
+
+       for (i = 0; i < n_intfs; i++) {
+               u32 intf_id;
+               if (get_user(intf_id, &arg->interfaces[i])) {
+                       ret = -EFAULT;
+                       goto err_get_user;
+               }
+
+               intfs[i] = idr_find(&dev->interfaces, intf_id);
+               if (!intfs[i]) {
+                       ret = -EINVAL;
+                       goto err_get_user;
+               }
+       }
+
+       if (n_bufs) {
+               bufs = kzalloc(sizeof(bufs[0]) * n_bufs, GFP_KERNEL);
+               if (!bufs) {
+                       ret = -ENOMEM;
+                       goto err_get_user;
+               }
+       }
+
+       for (i = 0; i < n_bufs; i++) {
+               ret = adf_buffer_import(dev, &arg->bufs[i], &bufs[i]);
+               if (ret < 0) {
+                       memset(&bufs[i], 0, sizeof(bufs[i]));
+                       goto err_import;
+               }
+       }
+
+       if (custom_data_size) {
+               custom_data = kzalloc(custom_data_size, GFP_KERNEL);
+               if (!custom_data) {
+                       ret = -ENOMEM;
+                       goto err_import;
+               }
+
+               if (copy_from_user(custom_data, arg->custom_data,
+                               custom_data_size)) {
+                       ret = -EFAULT;
+                       goto err_import;
+               }
+       }
+
+       if (put_user(complete_fence_fd, &arg->complete_fence)) {
+               ret = -EFAULT;
+               goto err_import;
+       }
+
+       complete_fence = adf_device_post_nocopy(dev, intfs, n_intfs, bufs,
+                       n_bufs, custom_data, custom_data_size);
+       if (IS_ERR(complete_fence)) {
+               ret = PTR_ERR(complete_fence);
+               goto err_import;
+       }
+
+       sync_fence_install(complete_fence, complete_fence_fd);
+       return 0;
+
+err_import:
+       for (i = 0; i < n_bufs; i++)
+               adf_buffer_cleanup(&bufs[i]);
+
+err_get_user:
+       kfree(custom_data);
+       kfree(bufs);
+       kfree(intfs);
+       put_unused_fd(complete_fence_fd);
+       return ret;
+}
+
+static int adf_intf_simple_post_config(struct adf_interface *intf,
+               struct adf_simple_post_config __user *arg)
+{
+       struct adf_device *dev = intf->base.parent;
+       struct sync_fence *complete_fence;
+       int complete_fence_fd;
+       struct adf_buffer buf;
+       int ret = 0;
+
+       complete_fence_fd = get_unused_fd();
+       if (complete_fence_fd < 0)
+               return complete_fence_fd;
+
+       ret = adf_buffer_import(dev, &arg->buf, &buf);
+       if (ret < 0)
+               goto err_import;
+
+       if (put_user(complete_fence_fd, &arg->complete_fence)) {
+               ret = -EFAULT;
+               goto err_put_user;
+       }
+
+       complete_fence = adf_interface_simple_post(intf, &buf);
+       if (IS_ERR(complete_fence)) {
+               ret = PTR_ERR(complete_fence);
+               goto err_put_user;
+       }
+
+       sync_fence_install(complete_fence, complete_fence_fd);
+       return 0;
+
+err_put_user:
+       adf_buffer_cleanup(&buf);
+err_import:
+       put_unused_fd(complete_fence_fd);
+       return ret;
+}
+
+static int adf_intf_simple_buffer_alloc(struct adf_interface *intf,
+               struct adf_simple_buffer_alloc __user *arg)
+{
+       struct adf_simple_buffer_alloc data;
+       struct dma_buf *dma_buf;
+       int ret = 0;
+
+       if (copy_from_user(&data, arg, sizeof(data)))
+               return -EFAULT;
+
+       data.fd = get_unused_fd_flags(O_CLOEXEC);
+       if (data.fd < 0)
+               return data.fd;
+
+       ret = adf_interface_simple_buffer_alloc(intf, data.w, data.h,
+                       data.format, &dma_buf, &data.offset, &data.pitch);
+       if (ret < 0)
+               goto err_alloc;
+
+       if (copy_to_user(arg, &data, sizeof(*arg))) {
+               ret = -EFAULT;
+               goto err_copy;
+       }
+
+       fd_install(data.fd, dma_buf->file);
+       return 0;
+
+err_copy:
+       dma_buf_put(dma_buf);
+
+err_alloc:
+       put_unused_fd(data.fd);
+       return ret;
+}
+
+static int adf_copy_attachment_list_to_user(
+               struct adf_attachment_config __user *to, size_t n_to,
+               struct adf_attachment *from, size_t n_from)
+{
+       struct adf_attachment_config *temp;
+       size_t n = min(n_to, n_from);
+       size_t i;
+       int ret = 0;
+
+       if (!n)
+               return 0;
+
+       temp = kzalloc(n * sizeof(temp[0]), GFP_KERNEL);
+       if (!temp)
+               return -ENOMEM;
+
+       for (i = 0; i < n; i++) {
+               temp[i].interface = from[i].interface->base.id;
+               temp[i].overlay_engine = from[i].overlay_engine->base.id;
+       }
+
+       if (copy_to_user(to, temp, n * sizeof(to[0]))) {
+               ret = -EFAULT;
+               goto done;
+       }
+
+done:
+       kfree(temp);
+       return ret;
+}
+
+static int adf_device_get_data(struct adf_device *dev,
+               struct adf_device_data __user *arg)
+{
+       struct adf_device_data data;
+       size_t n_attach;
+       struct adf_attachment *attach = NULL;
+       size_t n_allowed_attach;
+       struct adf_attachment *allowed_attach = NULL;
+       int ret = 0;
+
+       if (copy_from_user(&data, arg, sizeof(data)))
+               return -EFAULT;
+
+       if (data.n_attachments > ADF_MAX_ATTACHMENTS ||
+                       data.n_allowed_attachments > ADF_MAX_ATTACHMENTS)
+               return -EINVAL;
+
+       strlcpy(data.name, dev->base.name, sizeof(data.name));
+
+       if (data.n_attachments) {
+               attach = kzalloc(data.n_attachments * sizeof(attach[0]),
+                               GFP_KERNEL);
+               if (!attach)
+                       return -ENOMEM;
+       }
+       n_attach = adf_device_attachments(dev, attach, data.n_attachments);
+
+       if (data.n_allowed_attachments) {
+               allowed_attach = kzalloc(data.n_allowed_attachments *
+                               sizeof(allowed_attach[0]), GFP_KERNEL);
+               if (!allowed_attach) {
+                       ret = -ENOMEM;
+                       goto done;
+               }
+       }
+       n_allowed_attach = adf_device_attachments_allowed(dev, allowed_attach,
+                       data.n_allowed_attachments);
+
+       mutex_lock(&dev->client_lock);
+       ret = adf_obj_copy_custom_data_to_user(&dev->base, arg->custom_data,
+                       &data.custom_data_size);
+       mutex_unlock(&dev->client_lock);
+
+       if (ret < 0)
+               goto done;
+
+       ret = adf_copy_attachment_list_to_user(arg->attachments,
+                       data.n_attachments, attach, n_attach);
+       if (ret < 0)
+               goto done;
+
+       ret = adf_copy_attachment_list_to_user(arg->allowed_attachments,
+                       data.n_allowed_attachments, allowed_attach,
+                       n_allowed_attach);
+       if (ret < 0)
+               goto done;
+
+       data.n_attachments = n_attach;
+       data.n_allowed_attachments = n_allowed_attach;
+
+       if (copy_to_user(arg, &data, sizeof(data)))
+               ret = -EFAULT;
+
+done:
+       kfree(allowed_attach);
+       kfree(attach);
+       return ret;
+}
+
+static int adf_device_handle_attachment(struct adf_device *dev,
+               struct adf_attachment_config __user *arg, bool attach)
+{
+       struct adf_attachment_config data;
+       struct adf_overlay_engine *eng;
+       struct adf_interface *intf;
+
+       if (copy_from_user(&data, arg, sizeof(data)))
+               return -EFAULT;
+
+       eng = idr_find(&dev->overlay_engines, data.overlay_engine);
+       if (!eng) {
+               dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
+                               data.overlay_engine);
+               return -EINVAL;
+       }
+
+       intf = idr_find(&dev->interfaces, data.interface);
+       if (!intf) {
+               dev_err(&dev->base.dev, "invalid interface id %u\n",
+                               data.interface);
+               return -EINVAL;
+       }
+
+       if (attach)
+               return adf_device_attach(dev, eng, intf);
+       else
+               return adf_device_detach(dev, eng, intf);
+}
+
+static int adf_intf_set_mode(struct adf_interface *intf,
+               struct drm_mode_modeinfo __user *arg)
+{
+       struct drm_mode_modeinfo mode;
+
+       if (copy_from_user(&mode, arg, sizeof(mode)))
+               return -EFAULT;
+
+       return adf_interface_set_mode(intf, &mode);
+}
+
+static int adf_intf_get_data(struct adf_interface *intf,
+               struct adf_interface_data __user *arg)
+{
+       struct adf_device *dev = adf_interface_parent(intf);
+       struct adf_interface_data data;
+       struct drm_mode_modeinfo *modelist;
+       size_t modelist_size;
+       int err;
+       int ret = 0;
+       unsigned long flags;
+
+       if (copy_from_user(&data, arg, sizeof(data)))
+               return -EFAULT;
+
+       strlcpy(data.name, intf->base.name, sizeof(data.name));
+
+       data.type = intf->type;
+       data.id = intf->idx;
+       data.flags = intf->flags;
+
+       err = adf_interface_get_screen_size(intf, &data.width_mm,
+                       &data.height_mm);
+       if (err < 0) {
+               data.width_mm = 0;
+               data.height_mm = 0;
+       }
+
+       modelist = kmalloc(sizeof(modelist[0]) * ADF_MAX_MODES, GFP_KERNEL);
+       if (!modelist)
+               return -ENOMEM;
+
+       mutex_lock(&dev->client_lock);
+       read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+       data.hotplug_detect = intf->hotplug_detect;
+       modelist_size = min(data.n_available_modes, intf->n_modes) *
+                       sizeof(intf->modelist[0]);
+       memcpy(modelist, intf->modelist, modelist_size);
+       data.n_available_modes = intf->n_modes;
+       read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+       if (copy_to_user(arg->available_modes, modelist, modelist_size)) {
+               ret = -EFAULT;
+               goto done;
+       }
+
+       data.dpms_state = intf->dpms_state;
+       memcpy(&data.current_mode, &intf->current_mode,
+                       sizeof(intf->current_mode));
+
+       ret = adf_obj_copy_custom_data_to_user(&intf->base, arg->custom_data,
+                       &data.custom_data_size);
+done:
+       mutex_unlock(&dev->client_lock);
+       kfree(modelist);
+
+       if (ret < 0)
+               return ret;
+
+       if (copy_to_user(arg, &data, sizeof(data)))
+               ret = -EFAULT;
+
+       return ret;
+}
+
+static inline long adf_obj_custom_ioctl(struct adf_obj *obj, unsigned int cmd,
+               unsigned long arg)
+{
+       if (obj->ops && obj->ops->ioctl)
+               return obj->ops->ioctl(obj, cmd, arg);
+       return -ENOTTY;
+}
+
+static long adf_overlay_engine_ioctl(struct adf_overlay_engine *eng,
+               struct adf_file *file, unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case ADF_SET_EVENT:
+               return adf_obj_set_event(&eng->base, file,
+                               (struct adf_set_event __user *)arg);
+
+       case ADF_GET_OVERLAY_ENGINE_DATA:
+               return adf_eng_get_data(eng,
+                       (struct adf_overlay_engine_data __user *)arg);
+
+       case ADF_BLANK:
+       case ADF_POST_CONFIG:
+       case ADF_SET_MODE:
+       case ADF_GET_DEVICE_DATA:
+       case ADF_GET_INTERFACE_DATA:
+       case ADF_SIMPLE_POST_CONFIG:
+       case ADF_SIMPLE_BUFFER_ALLOC:
+       case ADF_ATTACH:
+       case ADF_DETACH:
+               return -EINVAL;
+
+       default:
+               return adf_obj_custom_ioctl(&eng->base, cmd, arg);
+       }
+}
+
+static long adf_interface_ioctl(struct adf_interface *intf,
+               struct adf_file *file, unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case ADF_SET_EVENT:
+               return adf_obj_set_event(&intf->base, file,
+                               (struct adf_set_event __user *)arg);
+
+       case ADF_BLANK:
+               return adf_interface_blank(intf, arg);
+
+       case ADF_SET_MODE:
+               return adf_intf_set_mode(intf,
+                               (struct drm_mode_modeinfo __user *)arg);
+
+       case ADF_GET_INTERFACE_DATA:
+               return adf_intf_get_data(intf,
+                               (struct adf_interface_data __user *)arg);
+
+       case ADF_SIMPLE_POST_CONFIG:
+               return adf_intf_simple_post_config(intf,
+                               (struct adf_simple_post_config __user *)arg);
+
+       case ADF_SIMPLE_BUFFER_ALLOC:
+               return adf_intf_simple_buffer_alloc(intf,
+                               (struct adf_simple_buffer_alloc __user *)arg);
+
+       case ADF_POST_CONFIG:
+       case ADF_GET_DEVICE_DATA:
+       case ADF_GET_OVERLAY_ENGINE_DATA:
+       case ADF_ATTACH:
+       case ADF_DETACH:
+               return -EINVAL;
+
+       default:
+               return adf_obj_custom_ioctl(&intf->base, cmd, arg);
+       }
+}
+
+static long adf_device_ioctl(struct adf_device *dev, struct adf_file *file,
+               unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case ADF_SET_EVENT:
+               return adf_obj_set_event(&dev->base, file,
+                               (struct adf_set_event __user *)arg);
+
+       case ADF_POST_CONFIG:
+               return adf_device_post_config(dev,
+                               (struct adf_post_config __user *)arg);
+
+       case ADF_GET_DEVICE_DATA:
+               return adf_device_get_data(dev,
+                               (struct adf_device_data __user *)arg);
+
+       case ADF_ATTACH:
+               return adf_device_handle_attachment(dev,
+                               (struct adf_attachment_config __user *)arg,
+                               true);
+
+       case ADF_DETACH:
+               return adf_device_handle_attachment(dev,
+                               (struct adf_attachment_config __user *)arg,
+                               false);
+
+       case ADF_BLANK:
+       case ADF_SET_MODE:
+       case ADF_GET_INTERFACE_DATA:
+       case ADF_GET_OVERLAY_ENGINE_DATA:
+       case ADF_SIMPLE_POST_CONFIG:
+       case ADF_SIMPLE_BUFFER_ALLOC:
+               return -EINVAL;
+
+       default:
+               return adf_obj_custom_ioctl(&dev->base, cmd, arg);
+       }
+}
+
+static int adf_file_open(struct inode *inode, struct file *file)
+{
+       struct adf_obj *obj;
+       struct adf_file *fpriv = NULL;
+       unsigned long flags;
+       int ret = 0;
+
+       obj = adf_obj_sysfs_find(iminor(inode));
+       if (!obj)
+               return -ENODEV;
+
+       dev_dbg(&obj->dev, "opening %s\n", dev_name(&obj->dev));
+
+       if (!try_module_get(obj->parent->ops->owner)) {
+               dev_err(&obj->dev, "getting owner module failed\n");
+               return -ENODEV;
+       }
+
+       fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+       if (!fpriv) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       INIT_LIST_HEAD(&fpriv->head);
+       fpriv->obj = obj;
+       init_waitqueue_head(&fpriv->event_wait);
+
+       file->private_data = fpriv;
+
+       if (obj->ops && obj->ops->open) {
+               ret = obj->ops->open(obj, inode, file);
+               if (ret < 0)
+                       goto done;
+       }
+
+       spin_lock_irqsave(&obj->file_lock, flags);
+       list_add_tail(&fpriv->head, &obj->file_list);
+       spin_unlock_irqrestore(&obj->file_lock, flags);
+
+done:
+       if (ret < 0) {
+               kfree(fpriv);
+               module_put(obj->parent->ops->owner);
+       }
+       return ret;
+}
+
+static int adf_file_release(struct inode *inode, struct file *file)
+{
+       struct adf_file *fpriv = file->private_data;
+       struct adf_obj *obj = fpriv->obj;
+       enum adf_event_type event_type;
+       unsigned long flags;
+
+       if (obj->ops && obj->ops->release)
+               obj->ops->release(obj, inode, file);
+
+       spin_lock_irqsave(&obj->file_lock, flags);
+       list_del(&fpriv->head);
+       spin_unlock_irqrestore(&obj->file_lock, flags);
+
+       for_each_set_bit(event_type, fpriv->event_subscriptions,
+                       ADF_EVENT_TYPE_MAX) {
+               adf_event_put(obj, event_type);
+       }
+
+       kfree(fpriv);
+       module_put(obj->parent->ops->owner);
+
+       dev_dbg(&obj->dev, "released %s\n", dev_name(&obj->dev));
+       return 0;
+}
+
+long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       struct adf_file *fpriv = file->private_data;
+       struct adf_obj *obj = fpriv->obj;
+       long ret = -EINVAL;
+
+       dev_dbg(&obj->dev, "%s ioctl %u\n", dev_name(&obj->dev), _IOC_NR(cmd));
+
+       switch (obj->type) {
+       case ADF_OBJ_OVERLAY_ENGINE:
+               ret = adf_overlay_engine_ioctl(adf_obj_to_overlay_engine(obj),
+                               fpriv, cmd, arg);
+               break;
+
+       case ADF_OBJ_INTERFACE:
+               ret = adf_interface_ioctl(adf_obj_to_interface(obj), fpriv, cmd,
+                               arg);
+               break;
+
+       case ADF_OBJ_DEVICE:
+               ret = adf_device_ioctl(adf_obj_to_device(obj), fpriv, cmd, arg);
+               break;
+       }
+
+       return ret;
+}
+
+static inline bool adf_file_event_available(struct adf_file *fpriv)
+{
+       int head = fpriv->event_head;
+       int tail = fpriv->event_tail;
+       return CIRC_CNT(head, tail, sizeof(fpriv->event_buf)) != 0;
+}
+
+void adf_file_queue_event(struct adf_file *fpriv, struct adf_event *event)
+{
+       int head = fpriv->event_head;
+       int tail = fpriv->event_tail;
+       size_t space = CIRC_SPACE(head, tail, sizeof(fpriv->event_buf));
+       size_t space_to_end =
+                       CIRC_SPACE_TO_END(head, tail, sizeof(fpriv->event_buf));
+
+       if (space < event->length) {
+               dev_dbg(&fpriv->obj->dev,
+                               "insufficient buffer space for event %u\n",
+                               event->type);
+               return;
+       }
+
+       if (space_to_end >= event->length) {
+               memcpy(fpriv->event_buf + head, event, event->length);
+       } else {
+               memcpy(fpriv->event_buf + head, event, space_to_end);
+               memcpy(fpriv->event_buf, (u8 *)event + space_to_end,
+                               event->length - space_to_end);
+       }
+
+       smp_wmb();
+       fpriv->event_head = (fpriv->event_head + event->length) &
+                       (sizeof(fpriv->event_buf) - 1);
+       wake_up_interruptible_all(&fpriv->event_wait);
+}
+
+static ssize_t adf_file_copy_to_user(struct adf_file *fpriv,
+               char __user *buffer, size_t buffer_size)
+{
+       int head, tail;
+       u8 *event_buf;
+       size_t cnt, cnt_to_end, copy_size = 0;
+       ssize_t ret = 0;
+       unsigned long flags;
+
+       event_buf = kmalloc(min(buffer_size, sizeof(fpriv->event_buf)),
+                       GFP_KERNEL);
+       if (!event_buf)
+               return -ENOMEM;
+
+       spin_lock_irqsave(&fpriv->obj->file_lock, flags);
+
+       if (!adf_file_event_available(fpriv))
+               goto out;
+
+       head = fpriv->event_head;
+       tail = fpriv->event_tail;
+
+       cnt = CIRC_CNT(head, tail, sizeof(fpriv->event_buf));
+       cnt_to_end = CIRC_CNT_TO_END(head, tail, sizeof(fpriv->event_buf));
+       copy_size = min(buffer_size, cnt);
+
+       if (cnt_to_end >= copy_size) {
+               memcpy(event_buf, fpriv->event_buf + tail, copy_size);
+       } else {
+               memcpy(event_buf, fpriv->event_buf + tail, cnt_to_end);
+               memcpy(event_buf + cnt_to_end, fpriv->event_buf,
+                               copy_size - cnt_to_end);
+       }
+
+       fpriv->event_tail = (fpriv->event_tail + copy_size) &
+                       (sizeof(fpriv->event_buf) - 1);
+
+out:
+       spin_unlock_irqrestore(&fpriv->obj->file_lock, flags);
+       if (copy_size) {
+               if (copy_to_user(buffer, event_buf, copy_size))
+                       ret = -EFAULT;
+               else
+                       ret = copy_size;
+       }
+       kfree(event_buf);
+       return ret;
+}
+
+ssize_t adf_file_read(struct file *filp, char __user *buffer,
+                size_t count, loff_t *offset)
+{
+       struct adf_file *fpriv = filp->private_data;
+       int err;
+
+       err = wait_event_interruptible(fpriv->event_wait,
+                       adf_file_event_available(fpriv));
+       if (err < 0)
+               return err;
+
+       return adf_file_copy_to_user(fpriv, buffer, count);
+}
+
+unsigned int adf_file_poll(struct file *filp, struct poll_table_struct *wait)
+{
+       struct adf_file *fpriv = filp->private_data;
+       unsigned int mask = 0;
+
+       poll_wait(filp, &fpriv->event_wait, wait);
+
+       if (adf_file_event_available(fpriv))
+               mask |= POLLIN | POLLRDNORM;
+
+       return mask;
+}
+
+const struct file_operations adf_fops = {
+       .owner = THIS_MODULE,
+       .unlocked_ioctl = adf_file_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = adf_file_compat_ioctl,
+#endif
+       .open = adf_file_open,
+       .release = adf_file_release,
+       .llseek = default_llseek,
+       .read = adf_file_read,
+       .poll = adf_file_poll,
+};
diff --git a/drivers/video/adf/adf_fops.h b/drivers/video/adf/adf_fops.h
new file mode 100644 (file)
index 0000000..90a3a74
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_FOPS_H
+#define __VIDEO_ADF_ADF_FOPS_H
+
+#include <linux/bitmap.h>
+#include <linux/fs.h>
+
+extern const struct file_operations adf_fops;
+
+struct adf_file {
+       struct list_head head;
+       struct adf_obj *obj;
+
+       DECLARE_BITMAP(event_subscriptions, ADF_EVENT_TYPE_MAX);
+       u8 event_buf[4096];
+       int event_head;
+       int event_tail;
+       wait_queue_head_t event_wait;
+};
+
+void adf_file_queue_event(struct adf_file *file, struct adf_event *event);
+long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+#endif /* __VIDEO_ADF_ADF_FOPS_H */
diff --git a/drivers/video/adf/adf_fops32.c b/drivers/video/adf/adf_fops32.c
new file mode 100644 (file)
index 0000000..d299a81
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/uaccess.h>
+#include <video/adf.h>
+
+#include "adf_fops.h"
+#include "adf_fops32.h"
+
+long adf_compat_post_config(struct file *file,
+               struct adf_post_config32 __user *arg)
+{
+       struct adf_post_config32 cfg32;
+       struct adf_post_config __user *cfg;
+       int ret;
+
+       if (copy_from_user(&cfg32, arg, sizeof(cfg32)))
+               return -EFAULT;
+
+       cfg = compat_alloc_user_space(sizeof(*cfg));
+       if (!access_ok(VERIFY_WRITE, cfg, sizeof(*cfg)))
+               return -EFAULT;
+
+       if (put_user(cfg32.n_interfaces, &cfg->n_interfaces) ||
+                       put_user(compat_ptr(cfg32.interfaces),
+                                       &cfg->interfaces) ||
+                       put_user(cfg32.n_bufs, &cfg->n_bufs) ||
+                       put_user(compat_ptr(cfg32.bufs), &cfg->bufs) ||
+                       put_user(cfg32.custom_data_size,
+                                       &cfg->custom_data_size) ||
+                       put_user(compat_ptr(cfg32.custom_data),
+                                       &cfg->custom_data))
+               return -EFAULT;
+
+       ret = adf_file_ioctl(file, ADF_POST_CONFIG, (unsigned long)cfg);
+       if (ret < 0)
+               return ret;
+
+       if (copy_in_user(&arg->complete_fence, &cfg->complete_fence,
+                       sizeof(cfg->complete_fence)))
+               return -EFAULT;
+
+       return 0;
+}
+
+long adf_compat_get_device_data(struct file *file,
+               struct adf_device_data32 __user *arg)
+{
+       struct adf_device_data32 data32;
+       struct adf_device_data __user *data;
+       int ret;
+
+       if (copy_from_user(&data32, arg, sizeof(data32)))
+               return -EFAULT;
+
+       data = compat_alloc_user_space(sizeof(*data));
+       if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+               return -EFAULT;
+
+       if (put_user(data32.n_attachments, &data->n_attachments) ||
+                       put_user(compat_ptr(data32.attachments),
+                                       &data->attachments) ||
+                       put_user(data32.n_allowed_attachments,
+                                       &data->n_allowed_attachments) ||
+                       put_user(compat_ptr(data32.allowed_attachments),
+                                       &data->allowed_attachments) ||
+                       put_user(data32.custom_data_size,
+                                       &data->custom_data_size) ||
+                       put_user(compat_ptr(data32.custom_data),
+                                       &data->custom_data))
+               return -EFAULT;
+
+       ret = adf_file_ioctl(file, ADF_GET_DEVICE_DATA, (unsigned long)data);
+       if (ret < 0)
+               return ret;
+
+       if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+                       copy_in_user(&arg->n_attachments, &data->n_attachments,
+                                       sizeof(arg->n_attachments)) ||
+                       copy_in_user(&arg->n_allowed_attachments,
+                                       &data->n_allowed_attachments,
+                                       sizeof(arg->n_allowed_attachments)) ||
+                       copy_in_user(&arg->custom_data_size,
+                                       &data->custom_data_size,
+                                       sizeof(arg->custom_data_size)))
+               return -EFAULT;
+
+       return 0;
+}
+
+long adf_compat_get_interface_data(struct file *file,
+               struct adf_interface_data32 __user *arg)
+{
+       struct adf_interface_data32 data32;
+       struct adf_interface_data __user *data;
+       int ret;
+
+       if (copy_from_user(&data32, arg, sizeof(data32)))
+               return -EFAULT;
+
+       data = compat_alloc_user_space(sizeof(*data));
+       if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+               return -EFAULT;
+
+       if (put_user(data32.n_available_modes, &data->n_available_modes) ||
+                       put_user(compat_ptr(data32.available_modes),
+                                       &data->available_modes) ||
+                       put_user(data32.custom_data_size,
+                                       &data->custom_data_size) ||
+                       put_user(compat_ptr(data32.custom_data),
+                                       &data->custom_data))
+               return -EFAULT;
+
+       ret = adf_file_ioctl(file, ADF_GET_INTERFACE_DATA, (unsigned long)data);
+       if (ret < 0)
+               return ret;
+
+       if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+                       copy_in_user(&arg->type, &data->type,
+                                       sizeof(arg->type)) ||
+                       copy_in_user(&arg->id, &data->id, sizeof(arg->id)) ||
+                       copy_in_user(&arg->flags, &data->flags,
+                                       sizeof(arg->flags)) ||
+                       copy_in_user(&arg->dpms_state, &data->dpms_state,
+                                       sizeof(arg->dpms_state)) ||
+                       copy_in_user(&arg->hotplug_detect,
+                                       &data->hotplug_detect,
+                                       sizeof(arg->hotplug_detect)) ||
+                       copy_in_user(&arg->width_mm, &data->width_mm,
+                                       sizeof(arg->width_mm)) ||
+                       copy_in_user(&arg->height_mm, &data->height_mm,
+                                       sizeof(arg->height_mm)) ||
+                       copy_in_user(&arg->current_mode, &data->current_mode,
+                                       sizeof(arg->current_mode)) ||
+                       copy_in_user(&arg->n_available_modes,
+                                       &data->n_available_modes,
+                                       sizeof(arg->n_available_modes)) ||
+                       copy_in_user(&arg->custom_data_size,
+                                       &data->custom_data_size,
+                                       sizeof(arg->custom_data_size)))
+               return -EFAULT;
+
+       return 0;
+}
+
+long adf_compat_get_overlay_engine_data(struct file *file,
+               struct adf_overlay_engine_data32 __user *arg)
+{
+       struct adf_overlay_engine_data32 data32;
+       struct adf_overlay_engine_data __user *data;
+       int ret;
+
+       if (copy_from_user(&data32, arg, sizeof(data32)))
+               return -EFAULT;
+
+       data = compat_alloc_user_space(sizeof(*data));
+       if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+               return -EFAULT;
+
+       if (put_user(data32.n_supported_formats, &data->n_supported_formats) ||
+                       put_user(compat_ptr(data32.supported_formats),
+                                       &data->supported_formats) ||
+                       put_user(data32.custom_data_size,
+                                       &data->custom_data_size) ||
+                       put_user(compat_ptr(data32.custom_data),
+                                       &data->custom_data))
+               return -EFAULT;
+
+       ret = adf_file_ioctl(file, ADF_GET_OVERLAY_ENGINE_DATA,
+                       (unsigned long)data);
+       if (ret < 0)
+               return ret;
+
+       if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+                       copy_in_user(&arg->n_supported_formats,
+                                       &data->n_supported_formats,
+                                       sizeof(arg->n_supported_formats)) ||
+                       copy_in_user(&arg->custom_data_size,
+                                       &data->custom_data_size,
+                                       sizeof(arg->custom_data_size)))
+               return -EFAULT;
+
+       return 0;
+}
+
+long adf_file_compat_ioctl(struct file *file, unsigned int cmd,
+               unsigned long arg)
+{
+       switch (cmd) {
+       case ADF_POST_CONFIG32:
+               return adf_compat_post_config(file, compat_ptr(arg));
+
+       case ADF_GET_DEVICE_DATA32:
+               return adf_compat_get_device_data(file, compat_ptr(arg));
+
+       case ADF_GET_INTERFACE_DATA32:
+               return adf_compat_get_interface_data(file, compat_ptr(arg));
+
+       case ADF_GET_OVERLAY_ENGINE_DATA32:
+               return adf_compat_get_overlay_engine_data(file,
+                               compat_ptr(arg));
+
+       default:
+               return adf_file_ioctl(file, cmd, arg);
+       }
+}
diff --git a/drivers/video/adf/adf_fops32.h b/drivers/video/adf/adf_fops32.h
new file mode 100644 (file)
index 0000000..64034ce
--- /dev/null
@@ -0,0 +1,78 @@
+#ifndef __VIDEO_ADF_ADF_FOPS32_H
+#define __VIDEO_ADF_ADF_FOPS32_H
+
+#include <linux/compat.h>
+#include <linux/ioctl.h>
+
+#include <video/adf.h>
+
+#define ADF_POST_CONFIG32 \
+               _IOW(ADF_IOCTL_TYPE, 2, struct adf_post_config32)
+#define ADF_GET_DEVICE_DATA32 \
+               _IOR(ADF_IOCTL_TYPE, 4, struct adf_device_data32)
+#define ADF_GET_INTERFACE_DATA32 \
+               _IOR(ADF_IOCTL_TYPE, 5, struct adf_interface_data32)
+#define ADF_GET_OVERLAY_ENGINE_DATA32 \
+               _IOR(ADF_IOCTL_TYPE, 6, struct adf_overlay_engine_data32)
+
+struct adf_post_config32 {
+       compat_size_t n_interfaces;
+       compat_uptr_t interfaces;
+
+       compat_size_t n_bufs;
+       compat_uptr_t bufs;
+
+       compat_size_t custom_data_size;
+       compat_uptr_t custom_data;
+
+       __s32 complete_fence;
+};
+
+struct adf_device_data32 {
+       char name[ADF_NAME_LEN];
+
+       compat_size_t n_attachments;
+       compat_uptr_t attachments;
+
+       compat_size_t n_allowed_attachments;
+       compat_uptr_t allowed_attachments;
+
+       compat_size_t custom_data_size;
+       compat_uptr_t custom_data;
+};
+
+struct adf_interface_data32 {
+       char name[ADF_NAME_LEN];
+
+       __u8 type;
+       __u32 id;
+       /* e.g. type=ADF_INTF_TYPE_DSI, id=1 => DSI.1 */
+       __u32 flags;
+
+       __u8 dpms_state;
+       __u8 hotplug_detect;
+       __u16 width_mm;
+       __u16 height_mm;
+
+       struct drm_mode_modeinfo current_mode;
+       compat_size_t n_available_modes;
+       compat_uptr_t available_modes;
+
+       compat_size_t custom_data_size;
+       compat_uptr_t custom_data;
+};
+
+struct adf_overlay_engine_data32 {
+       char name[ADF_NAME_LEN];
+
+       compat_size_t n_supported_formats;
+       compat_uptr_t supported_formats;
+
+       compat_size_t custom_data_size;
+       compat_uptr_t custom_data;
+};
+
+long adf_file_compat_ioctl(struct file *file, unsigned int cmd,
+               unsigned long arg);
+
+#endif /* __VIDEO_ADF_ADF_FOPS32_H */
diff --git a/drivers/video/adf/adf_format.c b/drivers/video/adf/adf_format.c
new file mode 100644 (file)
index 0000000..e3f22c7
--- /dev/null
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ * modified from drivers/gpu/drm/drm_crtc.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <drm/drm_fourcc.h>
+#include <video/adf_format.h>
+
+bool adf_format_is_standard(u32 format)
+{
+       switch (format) {
+       case DRM_FORMAT_C8:
+       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_BGR233:
+       case DRM_FORMAT_XRGB4444:
+       case DRM_FORMAT_XBGR4444:
+       case DRM_FORMAT_RGBX4444:
+       case DRM_FORMAT_BGRX4444:
+       case DRM_FORMAT_ARGB4444:
+       case DRM_FORMAT_ABGR4444:
+       case DRM_FORMAT_RGBA4444:
+       case DRM_FORMAT_BGRA4444:
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_XBGR1555:
+       case DRM_FORMAT_RGBX5551:
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_ARGB1555:
+       case DRM_FORMAT_ABGR1555:
+       case DRM_FORMAT_RGBA5551:
+       case DRM_FORMAT_BGRA5551:
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_RGBX8888:
+       case DRM_FORMAT_BGRX8888:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_RGBA8888:
+       case DRM_FORMAT_BGRA8888:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_RGBX1010102:
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_ARGB2101010:
+       case DRM_FORMAT_ABGR2101010:
+       case DRM_FORMAT_RGBA1010102:
+       case DRM_FORMAT_BGRA1010102:
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+       case DRM_FORMAT_AYUV:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV444:
+       case DRM_FORMAT_YVU444:
+               return true;
+       default:
+               return false;
+       }
+}
+EXPORT_SYMBOL(adf_format_is_standard);
+
+bool adf_format_is_rgb(u32 format)
+{
+       switch (format) {
+       case DRM_FORMAT_C8:
+       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_BGR233:
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_XBGR1555:
+       case DRM_FORMAT_RGBX5551:
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_ARGB1555:
+       case DRM_FORMAT_ABGR1555:
+       case DRM_FORMAT_RGBA5551:
+       case DRM_FORMAT_BGRA5551:
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_RGBX8888:
+       case DRM_FORMAT_BGRX8888:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_RGBX1010102:
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_ARGB2101010:
+       case DRM_FORMAT_ABGR2101010:
+       case DRM_FORMAT_RGBA1010102:
+       case DRM_FORMAT_BGRA1010102:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_RGBA8888:
+       case DRM_FORMAT_BGRA8888:
+               return true;
+
+       default:
+               return false;
+       }
+}
+EXPORT_SYMBOL(adf_format_is_rgb);
+
+u8 adf_format_num_planes(u32 format)
+{
+       switch (format) {
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV444:
+       case DRM_FORMAT_YVU444:
+               return 3;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+               return 2;
+       default:
+               return 1;
+       }
+}
+EXPORT_SYMBOL(adf_format_num_planes);
+
+u8 adf_format_bpp(u32 format)
+{
+       switch (format) {
+       case DRM_FORMAT_C8:
+       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_BGR233:
+               return 8;
+
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_XBGR1555:
+       case DRM_FORMAT_RGBX5551:
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_ARGB1555:
+       case DRM_FORMAT_ABGR1555:
+       case DRM_FORMAT_RGBA5551:
+       case DRM_FORMAT_BGRA5551:
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
+               return 16;
+
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
+               return 24;
+
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_RGBX8888:
+       case DRM_FORMAT_BGRX8888:
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_RGBX1010102:
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_ARGB2101010:
+       case DRM_FORMAT_ABGR2101010:
+       case DRM_FORMAT_RGBA1010102:
+       case DRM_FORMAT_BGRA1010102:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_RGBA8888:
+       case DRM_FORMAT_BGRA8888:
+               return 32;
+
+       default:
+               pr_debug("%s: unsupported pixel format %u\n", __func__, format);
+               return 0;
+       }
+}
+EXPORT_SYMBOL(adf_format_bpp);
+
+u8 adf_format_plane_cpp(u32 format, int plane)
+{
+       if (plane >= adf_format_num_planes(format))
+               return 0;
+
+       switch (format) {
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+               return 2;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+               return plane ? 2 : 1;
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV444:
+       case DRM_FORMAT_YVU444:
+               return 1;
+       default:
+               return adf_format_bpp(format) / 8;
+       }
+}
+EXPORT_SYMBOL(adf_format_plane_cpp);
+
+u8 adf_format_horz_chroma_subsampling(u32 format)
+{
+       switch (format) {
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+               return 4;
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               return 2;
+       default:
+               return 1;
+       }
+}
+EXPORT_SYMBOL(adf_format_horz_chroma_subsampling);
+
+u8 adf_format_vert_chroma_subsampling(u32 format)
+{
+       switch (format) {
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+               return 4;
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+               return 2;
+       default:
+               return 1;
+       }
+}
+EXPORT_SYMBOL(adf_format_vert_chroma_subsampling);
diff --git a/drivers/video/adf/adf_memblock.c b/drivers/video/adf/adf_memblock.c
new file mode 100644 (file)
index 0000000..e73a7d5
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+
+struct adf_memblock_pdata {
+       phys_addr_t base;
+};
+
+static struct sg_table *adf_memblock_map(struct dma_buf_attachment *attach,
+               enum dma_data_direction direction)
+{
+       struct adf_memblock_pdata *pdata = attach->dmabuf->priv;
+       unsigned long pfn = PFN_DOWN(pdata->base);
+       struct page *page = pfn_to_page(pfn);
+       struct sg_table *table;
+       int nents, ret;
+
+       table = kzalloc(sizeof(*table), GFP_KERNEL);
+       if (!table)
+               return ERR_PTR(-ENOMEM);
+
+       ret = sg_alloc_table(table, 1, GFP_KERNEL);
+       if (ret < 0)
+               goto err_alloc;
+
+       sg_set_page(table->sgl, page, attach->dmabuf->size, 0);
+
+       nents = dma_map_sg(attach->dev, table->sgl, 1, direction);
+       if (!nents) {
+               ret = -EINVAL;
+               goto err_map;
+       }
+
+       return table;
+
+err_map:
+       sg_free_table(table);
+err_alloc:
+       kfree(table);
+       return ERR_PTR(ret);
+}
+
+static void adf_memblock_unmap(struct dma_buf_attachment *attach,
+               struct sg_table *table, enum dma_data_direction direction)
+{
+       dma_unmap_sg(attach->dev, table->sgl, 1, direction);
+       sg_free_table(table);
+}
+
+static void __init_memblock adf_memblock_release(struct dma_buf *buf)
+{
+       struct adf_memblock_pdata *pdata = buf->priv;
+       int err = memblock_free(pdata->base, buf->size);
+
+       if (err < 0)
+               pr_warn("%s: freeing memblock failed: %d\n", __func__, err);
+       kfree(pdata);
+}
+
+static void *adf_memblock_do_kmap(struct dma_buf *buf, unsigned long pgoffset,
+               bool atomic)
+{
+       struct adf_memblock_pdata *pdata = buf->priv;
+       unsigned long pfn = PFN_DOWN(pdata->base) + pgoffset;
+       struct page *page = pfn_to_page(pfn);
+
+       if (atomic)
+               return kmap_atomic(page);
+       else
+               return kmap(page);
+}
+
+static void *adf_memblock_kmap_atomic(struct dma_buf *buf,
+               unsigned long pgoffset)
+{
+       return adf_memblock_do_kmap(buf, pgoffset, true);
+}
+
+static void adf_memblock_kunmap_atomic(struct dma_buf *buf,
+               unsigned long pgoffset, void *vaddr)
+{
+       kunmap_atomic(vaddr);
+}
+
+static void *adf_memblock_kmap(struct dma_buf *buf, unsigned long pgoffset)
+{
+       return adf_memblock_do_kmap(buf, pgoffset, false);
+}
+
+static void adf_memblock_kunmap(struct dma_buf *buf, unsigned long pgoffset,
+               void *vaddr)
+{
+       kunmap(vaddr);
+}
+
+static int adf_memblock_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+       struct adf_memblock_pdata *pdata = buf->priv;
+
+       return remap_pfn_range(vma, vma->vm_start, PFN_DOWN(pdata->base),
+                       vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
+struct dma_buf_ops adf_memblock_ops = {
+       .map_dma_buf = adf_memblock_map,
+       .unmap_dma_buf = adf_memblock_unmap,
+       .release = adf_memblock_release,
+       .kmap_atomic = adf_memblock_kmap_atomic,
+       .kunmap_atomic = adf_memblock_kunmap_atomic,
+       .kmap = adf_memblock_kmap,
+       .kunmap = adf_memblock_kunmap,
+       .mmap = adf_memblock_mmap,
+};
+
+/**
+ * adf_memblock_export - export a memblock reserved area as a dma-buf
+ *
+ * @base: base physical address
+ * @size: memblock size
+ * @flags: mode flags for the dma-buf's file
+ *
+ * @base and @size must be page-aligned.
+ *
+ * Returns a dma-buf on success or ERR_PTR(-errno) on failure.
+ */
+struct dma_buf *adf_memblock_export(phys_addr_t base, size_t size, int flags)
+{
+       struct adf_memblock_pdata *pdata;
+       struct dma_buf *buf;
+
+       if (PAGE_ALIGN(base) != base || PAGE_ALIGN(size) != size)
+               return ERR_PTR(-EINVAL);
+
+       pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return ERR_PTR(-ENOMEM);
+
+       pdata->base = base;
+       buf = dma_buf_export(pdata, &adf_memblock_ops, size, flags);
+       if (IS_ERR(buf))
+               kfree(pdata);
+
+       return buf;
+}
+EXPORT_SYMBOL(adf_memblock_export);
diff --git a/drivers/video/adf/adf_sysfs.c b/drivers/video/adf/adf_sysfs.c
new file mode 100644 (file)
index 0000000..8c659c7
--- /dev/null
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <video/adf_client.h>
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+static struct class *adf_class;
+static int adf_major;
+static DEFINE_IDR(adf_minors);
+
+#define dev_to_adf_interface(p) \
+       adf_obj_to_interface(container_of(p, struct adf_obj, dev))
+
+static ssize_t dpms_state_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct adf_interface *intf = dev_to_adf_interface(dev);
+       return scnprintf(buf, PAGE_SIZE, "%u\n",
+                       adf_interface_dpms_state(intf));
+}
+
+static ssize_t dpms_state_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct adf_interface *intf = dev_to_adf_interface(dev);
+       u8 dpms_state;
+       int err;
+
+       err = kstrtou8(buf, 0, &dpms_state);
+       if (err < 0)
+               return err;
+
+       err = adf_interface_blank(intf, dpms_state);
+       if (err < 0)
+               return err;
+
+       return count;
+}
+
+static ssize_t current_mode_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct adf_interface *intf = dev_to_adf_interface(dev);
+       struct drm_mode_modeinfo mode;
+
+       adf_interface_current_mode(intf, &mode);
+
+       if (mode.name[0]) {
+               return scnprintf(buf, PAGE_SIZE, "%s\n", mode.name);
+       } else {
+               bool interlaced = !!(mode.flags & DRM_MODE_FLAG_INTERLACE);
+               return scnprintf(buf, PAGE_SIZE, "%ux%u%s\n", mode.hdisplay,
+                               mode.vdisplay, interlaced ? "i" : "");
+       }
+}
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct adf_interface *intf = dev_to_adf_interface(dev);
+       return scnprintf(buf, PAGE_SIZE, "%s\n",
+                       adf_interface_type_str(intf));
+}
+
+static ssize_t vsync_timestamp_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct adf_interface *intf = dev_to_adf_interface(dev);
+       ktime_t timestamp;
+       unsigned long flags;
+
+       read_lock_irqsave(&intf->vsync_lock, flags);
+       memcpy(&timestamp, &intf->vsync_timestamp, sizeof(timestamp));
+       read_unlock_irqrestore(&intf->vsync_lock, flags);
+
+       return scnprintf(buf, PAGE_SIZE, "%llu\n", ktime_to_ns(timestamp));
+}
+
+static ssize_t hotplug_detect_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct adf_interface *intf = dev_to_adf_interface(dev);
+       return scnprintf(buf, PAGE_SIZE, "%u\n", intf->hotplug_detect);
+}
+
+static struct device_attribute adf_interface_attrs[] = {
+       __ATTR(dpms_state, S_IRUGO|S_IWUSR, dpms_state_show, dpms_state_store),
+       __ATTR_RO(current_mode),
+       __ATTR_RO(hotplug_detect),
+       __ATTR_RO(type),
+       __ATTR_RO(vsync_timestamp),
+};
+
+int adf_obj_sysfs_init(struct adf_obj *obj, struct device *parent)
+{
+       int ret = idr_alloc(&adf_minors, obj, 0, 0, GFP_KERNEL);
+       if (ret < 0) {
+               pr_err("%s: allocating adf minor failed: %d\n", __func__,
+                               ret);
+               return ret;
+       }
+
+       obj->minor = ret;
+       obj->dev.parent = parent;
+       obj->dev.class = adf_class;
+       obj->dev.devt = MKDEV(adf_major, obj->minor);
+
+       ret = device_register(&obj->dev);
+       if (ret < 0) {
+               pr_err("%s: registering adf object failed: %d\n", __func__,
+                               ret);
+               goto err_device_register;
+       }
+
+       return 0;
+
+err_device_register:
+       idr_remove(&adf_minors, obj->minor);
+       return ret;
+}
+
+static char *adf_device_devnode(struct device *dev, umode_t *mode,
+               kuid_t *uid, kgid_t *gid)
+{
+       struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+       return kasprintf(GFP_KERNEL, "adf%d", obj->id);
+}
+
+static char *adf_interface_devnode(struct device *dev, umode_t *mode,
+               kuid_t *uid, kgid_t *gid)
+{
+       struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+       struct adf_interface *intf = adf_obj_to_interface(obj);
+       struct adf_device *parent = adf_interface_parent(intf);
+       return kasprintf(GFP_KERNEL, "adf-interface%d.%d",
+                       parent->base.id, intf->base.id);
+}
+
+static char *adf_overlay_engine_devnode(struct device *dev, umode_t *mode,
+               kuid_t *uid, kgid_t *gid)
+{
+       struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+       struct adf_overlay_engine *eng = adf_obj_to_overlay_engine(obj);
+       struct adf_device *parent = adf_overlay_engine_parent(eng);
+       return kasprintf(GFP_KERNEL, "adf-overlay-engine%d.%d",
+                       parent->base.id, eng->base.id);
+}
+
+static void adf_noop_release(struct device *dev)
+{
+}
+
+static struct device_type adf_device_type = {
+       .name = "adf_device",
+       .devnode = adf_device_devnode,
+       .release = adf_noop_release,
+};
+
+static struct device_type adf_interface_type = {
+       .name = "adf_interface",
+       .devnode = adf_interface_devnode,
+       .release = adf_noop_release,
+};
+
+static struct device_type adf_overlay_engine_type = {
+       .name = "adf_overlay_engine",
+       .devnode = adf_overlay_engine_devnode,
+       .release = adf_noop_release,
+};
+
+int adf_device_sysfs_init(struct adf_device *dev)
+{
+       dev->base.dev.type = &adf_device_type;
+       dev_set_name(&dev->base.dev, "%s", dev->base.name);
+       return adf_obj_sysfs_init(&dev->base, dev->dev);
+}
+
+int adf_interface_sysfs_init(struct adf_interface *intf)
+{
+       struct adf_device *parent = adf_interface_parent(intf);
+       size_t i, j;
+       int ret;
+
+       intf->base.dev.type = &adf_interface_type;
+       dev_set_name(&intf->base.dev, "%s-interface%d", parent->base.name,
+                       intf->base.id);
+
+       ret = adf_obj_sysfs_init(&intf->base, &parent->base.dev);
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++) {
+               ret = device_create_file(&intf->base.dev,
+                               &adf_interface_attrs[i]);
+               if (ret < 0) {
+                       dev_err(&intf->base.dev, "creating sysfs attribute %s failed: %d\n",
+                                       adf_interface_attrs[i].attr.name, ret);
+                       goto err;
+               }
+       }
+
+       return 0;
+
+err:
+       for (j = 0; j < i; j++)
+               device_remove_file(&intf->base.dev, &adf_interface_attrs[j]);
+       return ret;
+}
+
+int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng)
+{
+       struct adf_device *parent = adf_overlay_engine_parent(eng);
+
+       eng->base.dev.type = &adf_overlay_engine_type;
+       dev_set_name(&eng->base.dev, "%s-overlay-engine%d", parent->base.name,
+                       eng->base.id);
+
+       return adf_obj_sysfs_init(&eng->base, &parent->base.dev);
+}
+
+struct adf_obj *adf_obj_sysfs_find(int minor)
+{
+       return idr_find(&adf_minors, minor);
+}
+
+void adf_obj_sysfs_destroy(struct adf_obj *obj)
+{
+       idr_remove(&adf_minors, obj->minor);
+       device_unregister(&obj->dev);
+}
+
+void adf_device_sysfs_destroy(struct adf_device *dev)
+{
+       adf_obj_sysfs_destroy(&dev->base);
+}
+
+void adf_interface_sysfs_destroy(struct adf_interface *intf)
+{
+       size_t i;
+
+       for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++)
+               device_remove_file(&intf->base.dev, &adf_interface_attrs[i]);
+       adf_obj_sysfs_destroy(&intf->base);
+}
+
+void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng)
+{
+       adf_obj_sysfs_destroy(&eng->base);
+}
+
+int adf_sysfs_init(void)
+{
+       struct class *class;
+       int ret;
+
+       class = class_create(THIS_MODULE, "adf");
+       if (IS_ERR(class)) {
+               ret = PTR_ERR(class);
+               pr_err("%s: creating class failed: %d\n", __func__, ret);
+               return ret;
+       }
+
+       ret = register_chrdev(0, "adf", &adf_fops);
+       if (ret < 0) {
+               pr_err("%s: registering device failed: %d\n", __func__, ret);
+               goto err_chrdev;
+       }
+
+       adf_class = class;
+       adf_major = ret;
+       return 0;
+
+err_chrdev:
+       class_destroy(adf_class);
+       return ret;
+}
+
+void adf_sysfs_destroy(void)
+{
+       idr_destroy(&adf_minors);
+       class_destroy(adf_class);
+}
diff --git a/drivers/video/adf/adf_sysfs.h b/drivers/video/adf/adf_sysfs.h
new file mode 100644 (file)
index 0000000..0613ac3
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_SYSFS_H
+#define __VIDEO_ADF_ADF_SYSFS_H
+
+struct adf_device;
+struct adf_interface;
+struct adf_overlay_engine;
+
+int adf_device_sysfs_init(struct adf_device *dev);
+void adf_device_sysfs_destroy(struct adf_device *dev);
+int adf_interface_sysfs_init(struct adf_interface *intf);
+void adf_interface_sysfs_destroy(struct adf_interface *intf);
+int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng);
+void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng);
+struct adf_obj *adf_obj_sysfs_find(int minor);
+
+int adf_sysfs_init(void);
+void adf_sysfs_destroy(void);
+
+#endif /* __VIDEO_ADF_ADF_SYSFS_H */
diff --git a/drivers/video/adf/adf_trace.h b/drivers/video/adf/adf_trace.h
new file mode 100644 (file)
index 0000000..3cb2a84
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM adf
+
+#if !defined(__VIDEO_ADF_ADF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __VIDEO_ADF_ADF_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <video/adf.h>
+
+TRACE_EVENT(adf_event,
+       TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+       TP_ARGS(obj, type),
+
+       TP_STRUCT__entry(
+               __string(name, obj->name)
+               __field(enum adf_event_type, type)
+               __array(char, type_str, 32)
+       ),
+       TP_fast_assign(
+               __assign_str(name, obj->name);
+               __entry->type = type;
+               strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+                               sizeof(__entry->type_str));
+       ),
+       TP_printk("obj=%s type=%u (%s)",
+                       __get_str(name),
+                       __entry->type,
+                       __entry->type_str)
+);
+
+TRACE_EVENT(adf_event_enable,
+       TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+       TP_ARGS(obj, type),
+
+       TP_STRUCT__entry(
+               __string(name, obj->name)
+               __field(enum adf_event_type, type)
+               __array(char, type_str, 32)
+       ),
+       TP_fast_assign(
+               __assign_str(name, obj->name);
+               __entry->type = type;
+               strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+                               sizeof(__entry->type_str));
+       ),
+       TP_printk("obj=%s type=%u (%s)",
+                       __get_str(name),
+                       __entry->type,
+                       __entry->type_str)
+);
+
+TRACE_EVENT(adf_event_disable,
+       TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+       TP_ARGS(obj, type),
+
+       TP_STRUCT__entry(
+               __string(name, obj->name)
+               __field(enum adf_event_type, type)
+               __array(char, type_str, 32)
+       ),
+       TP_fast_assign(
+               __assign_str(name, obj->name);
+               __entry->type = type;
+               strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+                               sizeof(__entry->type_str));
+       ),
+       TP_printk("obj=%s type=%u (%s)",
+                       __get_str(name),
+                       __entry->type,
+                       __entry->type_str)
+);
+
+#endif /* __VIDEO_ADF_ADF_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE adf_trace
+#include <trace/define_trace.h>
index e033491fe3088a1f4496aeecc47734416a739963..ab29939ea4cd52619ef0ba6c4239b0cb09c0fa6d 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/slab.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/ds2482.h>
 #include <asm/delay.h>
 
 #include "../w1.h"
@@ -84,7 +86,8 @@ static const u8 ds2482_chan_rd[8] =
 static int ds2482_probe(struct i2c_client *client,
                        const struct i2c_device_id *id);
 static int ds2482_remove(struct i2c_client *client);
-
+static int ds2482_suspend(struct device *dev);
+static int ds2482_resume(struct device *dev);
 
 /**
  * Driver data (common to all clients)
@@ -94,10 +97,16 @@ static const struct i2c_device_id ds2482_id[] = {
        { }
 };
 
+static const struct dev_pm_ops ds2482_pm_ops = {
+       .suspend = ds2482_suspend,
+       .resume = ds2482_resume,
+};
+
 static struct i2c_driver ds2482_driver = {
        .driver = {
                .owner  = THIS_MODULE,
                .name   = "ds2482",
+               .pm = &ds2482_pm_ops,
        },
        .probe          = ds2482_probe,
        .remove         = ds2482_remove,
@@ -119,6 +128,7 @@ struct ds2482_w1_chan {
 struct ds2482_data {
        struct i2c_client       *client;
        struct mutex            access_lock;
+       int                     slpz_gpio;
 
        /* 1-wire interface(s) */
        int                     w1_count;       /* 1 or 8 */
@@ -444,11 +454,31 @@ static u8 ds2482_w1_set_pullup(void *data, int delay)
        return retval;
 }
 
+static int ds2482_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ds2482_data *data = i2c_get_clientdata(client);
+
+       if (data->slpz_gpio >= 0)
+               gpio_set_value(data->slpz_gpio, 0);
+       return 0;
+}
+
+static int ds2482_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ds2482_data *data = i2c_get_clientdata(client);
+
+       if (data->slpz_gpio >= 0)
+               gpio_set_value(data->slpz_gpio, 1);
+       return 0;
+}
 
 static int ds2482_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
        struct ds2482_data *data;
+       struct ds2482_platform_data *pdata;
        int err = -ENODEV;
        int temp1;
        int idx;
@@ -515,6 +545,16 @@ static int ds2482_probe(struct i2c_client *client,
                }
        }
 
+       pdata = client->dev.platform_data;
+       data->slpz_gpio = pdata ? pdata->slpz_gpio : -1;
+
+       if (data->slpz_gpio >= 0) {
+               err = gpio_request_one(data->slpz_gpio, GPIOF_OUT_INIT_HIGH,
+                                      "ds2482.slpz");
+               if (err < 0)
+                       goto exit_w1_remove;
+       }
+
        return 0;
 
 exit_w1_remove:
@@ -539,6 +579,11 @@ static int ds2482_remove(struct i2c_client *client)
                        w1_remove_master_device(&data->w1_ch[idx].w1_bm);
        }
 
+       if (data->slpz_gpio >= 0) {
+               gpio_set_value(data->slpz_gpio, 0);
+               gpio_free(data->slpz_gpio);
+       }
+
        /* Free the memory */
        kfree(data);
        return 0;
index bfbf4700d160f8a4d54f7cc0446a35c4de8c747d..b70aa7c913940c3766263b67e564a053883f9f32 100644 (file)
@@ -447,7 +447,7 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
 {
        int error;
 
-       error = wait_event_freezekillable(server->response_q,
+       error = wait_event_freezekillable_unsafe(server->response_q,
                                    midQ->mid_state != MID_REQUEST_SUBMITTED);
        if (error < 0)
                return -ERESTARTSYS;
index deecc7294a672c3fa64cf3884c4ee28f4c7f80a7..db10e00c971a71add3be6b140a40b4f9d68580f1 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/mutex.h>
 #include <linux/anon_inodes.h>
 #include <linux/device.h>
+#include <linux/freezer.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/mman.h>
@@ -1602,7 +1603,8 @@ fetch_events:
                        }
 
                        spin_unlock_irqrestore(&ep->lock, flags);
-                       if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
+                       if (!freezable_schedule_hrtimeout_range(to, slack,
+                                                               HRTIMER_MODE_ABS))
                                timed_out = 1;
 
                        spin_lock_irqsave(&ep->lock, flags);
@@ -1817,8 +1819,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
                goto error_tgt_fput;
 
        /* Check if EPOLLWAKEUP is allowed */
-       if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
-               epds.events &= ~EPOLLWAKEUP;
+       ep_take_care_of_epollwakeup(&epds);
 
        /*
         * We have to check that the file structure underneath the file descriptor
index 7a6f02caf286d4fa20990b259dd5e4773eeff96e..4b775e606fc8f3ee44157e8a0b5dec4be7141bb4 100644 (file)
@@ -776,6 +776,13 @@ static int fat_ioctl_readdir(struct inode *inode, struct file *filp,
        return ret;
 }
 
+static int fat_ioctl_volume_id(struct inode *dir)
+{
+       struct super_block *sb = dir->i_sb;
+       struct msdos_sb_info *sbi = MSDOS_SB(sb);
+       return sbi->vol_id;
+}
+
 static long fat_dir_ioctl(struct file *filp, unsigned int cmd,
                          unsigned long arg)
 {
@@ -792,6 +799,8 @@ static long fat_dir_ioctl(struct file *filp, unsigned int cmd,
                short_only = 0;
                both = 1;
                break;
+       case VFAT_IOCTL_GET_VOLUME_ID:
+               return fat_ioctl_volume_id(inode);
        default:
                return fat_generic_ioctl(filp, cmd, arg);
        }
@@ -832,6 +841,8 @@ static long fat_compat_dir_ioctl(struct file *filp, unsigned cmd,
                short_only = 0;
                both = 1;
                break;
+       case VFAT_IOCTL_GET_VOLUME_ID:
+               return fat_ioctl_volume_id(inode);
        default:
                return fat_generic_ioctl(filp, cmd, (unsigned long)arg);
        }
index 21664fcf361673ec263ce2a23b59c3d7bdabf735..8180ecef59fc9856bfc6f349e9b03a4b286ca2b1 100644 (file)
@@ -86,6 +86,7 @@ struct msdos_sb_info {
        const void *dir_ops;          /* Opaque; default directory operations */
        int dir_per_block;            /* dir entries per block */
        int dir_per_block_bits;       /* log2(dir_per_block) */
+       unsigned long vol_id;         /* volume ID */
 
        int fatent_shift;
        struct fatent_operations *fatent_ops;
index 5d4513cb1b3c03604792f145125b21fb338a3101..a14dd4c0528a90a0f9dce633078ecd160113d59a 100644 (file)
@@ -1252,6 +1252,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
        struct inode *fsinfo_inode = NULL;
        struct buffer_head *bh;
        struct fat_boot_sector *b;
+       struct fat_boot_bsx *bsx;
        struct msdos_sb_info *sbi;
        u16 logical_sector_size;
        u32 total_sectors, total_clusters, fat_clusters, rootdir_sectors;
@@ -1398,6 +1399,8 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
                        goto out_fail;
                }
 
+               bsx = (struct fat_boot_bsx *)(bh->b_data + FAT32_BSX_OFFSET);
+
                fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data;
                if (!IS_FSINFO(fsinfo)) {
                        fat_msg(sb, KERN_WARNING, "Invalid FSINFO signature: "
@@ -1413,8 +1416,14 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
                }
 
                brelse(fsinfo_bh);
+       } else {
+               bsx = (struct fat_boot_bsx *)(bh->b_data + FAT16_BSX_OFFSET);
        }
 
+       /* interpret volume ID as a little endian 32 bit integer */
+       sbi->vol_id = (((u32)bsx->vol_id[0]) | ((u32)bsx->vol_id[1] << 8) |
+               ((u32)bsx->vol_id[2] << 16) | ((u32)bsx->vol_id[3] << 24));
+
        sbi->dir_per_block = sb->s_blocksize / sizeof(struct msdos_dir_entry);
        sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1;
 
index 387213ac2608ff6c3375ae411ad45716d4ef15e6..556af9eff33692ef523e6a39b6a5805f392847c7 100644 (file)
@@ -1155,7 +1155,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
        if ((inode->i_state & flags) == flags)
                return;
 
-       if (unlikely(block_dump))
+       if (unlikely(block_dump > 1))
                block_dump___mark_inode_dirty(inode);
 
        spin_lock(&inode->i_lock);
index 23bf1a52a5dac5d4a8084019d7c7ecef1df256d1..8eb5a0992658129ebdea8462dbc302af65665fd2 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/swap.h>
 #include <linux/splice.h>
 #include <linux/aio.h>
+#include <linux/freezer.h>
 
 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
 MODULE_ALIAS("devname:fuse");
@@ -464,7 +465,10 @@ __acquires(fc->lock)
         * Wait it out.
         */
        spin_unlock(&fc->lock);
-       wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
+
+       while (req->state != FUSE_REQ_FINISHED)
+               wait_event_freezable(req->waitq,
+                                    req->state == FUSE_REQ_FINISHED);
        spin_lock(&fc->lock);
 
        if (!req->aborted)
index cd4b9073dd2093a9ebd2267293bc733f033244a4..79872e22e4aeea8417713ecb40a5bd18e13a4310 100644 (file)
@@ -79,7 +79,7 @@ int nfs_wait_bit_killable(void *word)
 {
        if (fatal_signal_pending(current))
                return -ERESTARTSYS;
-       freezable_schedule();
+       freezable_schedule_unsafe();
        return 0;
 }
 EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
index 43ea96ced28cbc763a60048ab95cefcf737ecf40..ce90eb4775c2f888547bd98911062f8eede4af97 100644 (file)
@@ -33,7 +33,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
                res = rpc_call_sync(clnt, msg, flags);
                if (res != -EJUKEBOX)
                        break;
-               freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
+               freezable_schedule_timeout_killable_unsafe(NFS_JUKEBOX_RETRY_TIME);
                res = -ERESTARTSYS;
        } while (!fatal_signal_pending(current));
        return res;
index 1ae7dd5956c581ddc6bf52089f4d6c94983b09a0..bfeb1d13b08fe557b12fc314b2512298f5ab60e1 100644 (file)
@@ -268,7 +268,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
                *timeout = NFS4_POLL_RETRY_MIN;
        if (*timeout > NFS4_POLL_RETRY_MAX)
                *timeout = NFS4_POLL_RETRY_MAX;
-       freezable_schedule_timeout_killable(*timeout);
+       freezable_schedule_timeout_killable_unsafe(*timeout);
        if (fatal_signal_pending(current))
                res = -ERESTARTSYS;
        *timeout <<= 1;
@@ -4529,7 +4529,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
 static unsigned long
 nfs4_set_lock_task_retry(unsigned long timeout)
 {
-       freezable_schedule_timeout_killable(timeout);
+       freezable_schedule_timeout_killable_unsafe(timeout);
        timeout <<= 1;
        if (timeout > NFS4_LOCK_MAXTIMEOUT)
                return NFS4_LOCK_MAXTIMEOUT;
index de12b8128b959d5351bc6dcb26263f5ffc020503..040b030ef6c0ac915aa0329dfb015a92f1f76893 100644 (file)
@@ -139,6 +139,12 @@ struct pid_entry {
                NULL, &proc_single_file_operations,     \
                { .proc_show = show } )
 
+/* ANDROID is for special files in /proc. */
+#define ANDROID(NAME, MODE, OTYPE)                     \
+       NOD(NAME, (S_IFREG|(MODE)),                     \
+               &proc_##OTYPE##_inode_operations,       \
+               &proc_##OTYPE##_operations, {})
+
 /*
  * Count the number of hardlinks for the pid_entry table, excluding the .
  * and .. links.
@@ -1000,6 +1006,35 @@ out:
        return err < 0 ? err : count;
 }
 
+static int oom_adjust_permission(struct inode *inode, int mask)
+{
+       uid_t uid;
+       struct task_struct *p;
+
+       p = get_proc_task(inode);
+       if(p) {
+               uid = task_uid(p);
+               put_task_struct(p);
+       }
+
+       /*
+        * System Server (uid == 1000) is granted access to oom_adj of all 
+        * android applications (uid > 10000) as and services (uid >= 1000)
+        */
+       if (p && (current_fsuid() == 1000) && (uid >= 1000)) {
+               if (inode->i_mode >> 6 & mask) {
+                       return 0;
+               }
+       }
+
+       /* Fall back to default. */
+       return generic_permission(inode, mask);
+}
+
+static const struct inode_operations proc_oom_adj_inode_operations = {
+       .permission     = oom_adjust_permission,
+};
+
 static const struct file_operations proc_oom_adj_operations = {
        .read           = oom_adj_read,
        .write          = oom_adj_write,
@@ -2698,7 +2733,7 @@ static const struct pid_entry tgid_base_stuff[] = {
        REG("cgroup",  S_IRUGO, proc_cgroup_operations),
 #endif
        INF("oom_score",  S_IRUGO, proc_oom_score),
-       REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adj_operations),
+       ANDROID("oom_adj", S_IRUGO|S_IWUSR, oom_adj),
        REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
 #ifdef CONFIG_AUDITSYSCALL
        REG("loginuid",   S_IWUSR|S_IRUGO, proc_loginuid_operations),
index 65fc60a07c47ba7fb13879f130c4c9c93046c18f..db17f98bc5647ee2f5076e1ec858126d1c94b1f2 100644 (file)
@@ -134,6 +134,56 @@ static void release_task_mempolicy(struct proc_maps_private *priv)
 }
 #endif
 
+static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
+{
+       const char __user *name = vma_get_anon_name(vma);
+       struct mm_struct *mm = vma->vm_mm;
+
+       unsigned long page_start_vaddr;
+       unsigned long page_offset;
+       unsigned long num_pages;
+       unsigned long max_len = NAME_MAX;
+       int i;
+
+       page_start_vaddr = (unsigned long)name & PAGE_MASK;
+       page_offset = (unsigned long)name - page_start_vaddr;
+       num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);
+
+       seq_puts(m, "[anon:");
+
+       for (i = 0; i < num_pages; i++) {
+               int len;
+               int write_len;
+               const char *kaddr;
+               long pages_pinned;
+               struct page *page;
+
+               pages_pinned = get_user_pages(current, mm, page_start_vaddr,
+                               1, 0, 0, &page, NULL);
+               if (pages_pinned < 1) {
+                       seq_puts(m, "<fault>]");
+                       return;
+               }
+
+               kaddr = (const char *)kmap(page);
+               len = min(max_len, PAGE_SIZE - page_offset);
+               write_len = strnlen(kaddr + page_offset, len);
+               seq_write(m, kaddr + page_offset, write_len);
+               kunmap(page);
+               put_page(page);
+
+               /* if strnlen hit a null terminator then we're done */
+               if (write_len != len)
+                       break;
+
+               max_len -= len;
+               page_offset = 0;
+               page_start_vaddr += PAGE_SIZE;
+       }
+
+       seq_putc(m, ']');
+}
+
 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
 {
        if (vma && vma != priv->tail_vma) {
@@ -335,6 +385,12 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
                                pad_len_spaces(m, len);
                                seq_printf(m, "[stack:%d]", tid);
                        }
+                       goto done;
+               }
+
+               if (vma_get_anon_name(vma)) {
+                       pad_len_spaces(m, len);
+                       seq_print_vma_name(m, vma);
                }
        }
 
@@ -634,6 +690,12 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
 
        show_smap_vma_flags(m, vma);
 
+       if (vma_get_anon_name(vma)) {
+               seq_puts(m, "Name:           ");
+               seq_print_vma_name(m, vma);
+               seq_putc(m, '\n');
+       }
+
        if (m->count < m->size)  /* vma is copied successfully */
                m->version = (vma != get_gate_vma(task->mm))
                        ? vma->vm_start : 0;
index 1376e5a8f0d6c6cfa430f87ed31d36cc9af5e007..058f17f0b466aaee343cc76adc654cdd15f8d6ec 100644 (file)
@@ -377,6 +377,12 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
        return 0;
 }
 
+void notrace ramoops_console_write_buf(const char *buf, size_t size)
+{
+       struct ramoops_context *cxt = &oops_cxt;
+       persistent_ram_write(cxt->cprz, buf, size);
+}
+
 static int ramoops_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
index 8c1c96c27062a504bfc151f33d335fcd8f6779b5..6b14dc7df3a46df3293da538abc2d65b3b67f94f 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/rcupdate.h>
 #include <linux/hrtimer.h>
 #include <linux/sched/rt.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 
@@ -236,7 +237,8 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
 
        set_current_state(state);
        if (!pwq->triggered)
-               rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
+               rc = freezable_schedule_hrtimeout_range(expires, slack,
+                                                       HRTIMER_MODE_ABS);
        __set_current_state(TASK_RUNNING);
 
        /*
index 32b644f0369020a1c11e1fc7378bae581e4185c7..0013142c04759b527485f2abde83c084e17a7f8f 100644 (file)
@@ -8,6 +8,7 @@
  *
  */
 
+#include <linux/alarmtimer.h>
 #include <linux/file.h>
 #include <linux/poll.h>
 #include <linux/init.h>
 #include <linux/rcupdate.h>
 
 struct timerfd_ctx {
-       struct hrtimer tmr;
+       union {
+               struct hrtimer tmr;
+               struct alarm alarm;
+       } t;
        ktime_t tintv;
        ktime_t moffs;
        wait_queue_head_t wqh;
@@ -41,14 +45,19 @@ struct timerfd_ctx {
 static LIST_HEAD(cancel_list);
 static DEFINE_SPINLOCK(cancel_lock);
 
+static inline bool isalarm(struct timerfd_ctx *ctx)
+{
+       return ctx->clockid == CLOCK_REALTIME_ALARM ||
+               ctx->clockid == CLOCK_BOOTTIME_ALARM;
+}
+
 /*
  * This gets called when the timer event triggers. We set the "expired"
  * flag, but we do not re-arm the timer (in case it's necessary,
  * tintv.tv64 != 0) until the timer is accessed.
  */
-static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
+static void timerfd_triggered(struct timerfd_ctx *ctx)
 {
-       struct timerfd_ctx *ctx = container_of(htmr, struct timerfd_ctx, tmr);
        unsigned long flags;
 
        spin_lock_irqsave(&ctx->wqh.lock, flags);
@@ -56,10 +65,25 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
        ctx->ticks++;
        wake_up_locked(&ctx->wqh);
        spin_unlock_irqrestore(&ctx->wqh.lock, flags);
+}
 
+static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
+{
+       struct timerfd_ctx *ctx = container_of(htmr, struct timerfd_ctx,
+                                              t.tmr);
+       timerfd_triggered(ctx);
        return HRTIMER_NORESTART;
 }
 
+static enum alarmtimer_restart timerfd_alarmproc(struct alarm *alarm,
+       ktime_t now)
+{
+       struct timerfd_ctx *ctx = container_of(alarm, struct timerfd_ctx,
+                                              t.alarm);
+       timerfd_triggered(ctx);
+       return ALARMTIMER_NORESTART;
+}
+
 /*
  * Called when the clock was set to cancel the timers in the cancel
  * list. This will wake up processes waiting on these timers. The
@@ -107,8 +131,9 @@ static bool timerfd_canceled(struct timerfd_ctx *ctx)
 
 static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
 {
-       if (ctx->clockid == CLOCK_REALTIME && (flags & TFD_TIMER_ABSTIME) &&
-           (flags & TFD_TIMER_CANCEL_ON_SET)) {
+       if ((ctx->clockid == CLOCK_REALTIME ||
+            ctx->clockid == CLOCK_REALTIME_ALARM) &&
+           (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) {
                if (!ctx->might_cancel) {
                        ctx->might_cancel = true;
                        spin_lock(&cancel_lock);
@@ -124,7 +149,11 @@ static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
 {
        ktime_t remaining;
 
-       remaining = hrtimer_expires_remaining(&ctx->tmr);
+       if (isalarm(ctx))
+               remaining = alarm_expires_remaining(&ctx->t.alarm);
+       else
+               remaining = hrtimer_expires_remaining(&ctx->t.tmr);
+
        return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
 }
 
@@ -142,11 +171,28 @@ static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
        ctx->expired = 0;
        ctx->ticks = 0;
        ctx->tintv = timespec_to_ktime(ktmr->it_interval);
-       hrtimer_init(&ctx->tmr, clockid, htmode);
-       hrtimer_set_expires(&ctx->tmr, texp);
-       ctx->tmr.function = timerfd_tmrproc;
+
+       if (isalarm(ctx)) {
+               alarm_init(&ctx->t.alarm,
+                          ctx->clockid == CLOCK_REALTIME_ALARM ?
+                          ALARM_REALTIME : ALARM_BOOTTIME,
+                          timerfd_alarmproc);
+       } else {
+               hrtimer_init(&ctx->t.tmr, clockid, htmode);
+               hrtimer_set_expires(&ctx->t.tmr, texp);
+               ctx->t.tmr.function = timerfd_tmrproc;
+       }
+
        if (texp.tv64 != 0) {
-               hrtimer_start(&ctx->tmr, texp, htmode);
+               if (isalarm(ctx)) {
+                       if (flags & TFD_TIMER_ABSTIME)
+                               alarm_start(&ctx->t.alarm, texp);
+                       else
+                               alarm_start_relative(&ctx->t.alarm, texp);
+               } else {
+                       hrtimer_start(&ctx->t.tmr, texp, htmode);
+               }
+
                if (timerfd_canceled(ctx))
                        return -ECANCELED;
        }
@@ -158,7 +204,11 @@ static int timerfd_release(struct inode *inode, struct file *file)
        struct timerfd_ctx *ctx = file->private_data;
 
        timerfd_remove_cancel(ctx);
-       hrtimer_cancel(&ctx->tmr);
+
+       if (isalarm(ctx))
+               alarm_cancel(&ctx->t.alarm);
+       else
+               hrtimer_cancel(&ctx->t.tmr);
        kfree_rcu(ctx, rcu);
        return 0;
 }
@@ -215,9 +265,15 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
                         * callback to avoid DoS attacks specifying a very
                         * short timer period.
                         */
-                       ticks += hrtimer_forward_now(&ctx->tmr,
-                                                    ctx->tintv) - 1;
-                       hrtimer_restart(&ctx->tmr);
+                       if (isalarm(ctx)) {
+                               ticks += alarm_forward_now(
+                                       &ctx->t.alarm, ctx->tintv) - 1;
+                               alarm_restart(&ctx->t.alarm);
+                       } else {
+                               ticks += hrtimer_forward_now(&ctx->t.tmr,
+                                                            ctx->tintv) - 1;
+                               hrtimer_restart(&ctx->t.tmr);
+                       }
                }
                ctx->expired = 0;
                ctx->ticks = 0;
@@ -259,7 +315,10 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
 
        if ((flags & ~TFD_CREATE_FLAGS) ||
            (clockid != CLOCK_MONOTONIC &&
-            clockid != CLOCK_REALTIME))
+            clockid != CLOCK_REALTIME &&
+            clockid != CLOCK_REALTIME_ALARM &&
+            clockid != CLOCK_BOOTTIME &&
+            clockid != CLOCK_BOOTTIME_ALARM))
                return -EINVAL;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -268,7 +327,15 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
 
        init_waitqueue_head(&ctx->wqh);
        ctx->clockid = clockid;
-       hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS);
+
+       if (isalarm(ctx))
+               alarm_init(&ctx->t.alarm,
+                          ctx->clockid == CLOCK_REALTIME_ALARM ?
+                          ALARM_REALTIME : ALARM_BOOTTIME,
+                          timerfd_alarmproc);
+       else
+               hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS);
+
        ctx->moffs = ktime_get_monotonic_offset();
 
        ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
@@ -305,8 +372,14 @@ static int do_timerfd_settime(int ufd, int flags,
         */
        for (;;) {
                spin_lock_irq(&ctx->wqh.lock);
-               if (hrtimer_try_to_cancel(&ctx->tmr) >= 0)
-                       break;
+
+               if (isalarm(ctx)) {
+                       if (alarm_try_to_cancel(&ctx->t.alarm) >= 0)
+                               break;
+               } else {
+                       if (hrtimer_try_to_cancel(&ctx->t.tmr) >= 0)
+                               break;
+               }
                spin_unlock_irq(&ctx->wqh.lock);
                cpu_relax();
        }
@@ -317,8 +390,12 @@ static int do_timerfd_settime(int ufd, int flags,
         * We do not update "ticks" and "expired" since the timer will be
         * re-programmed again in the following timerfd_setup() call.
         */
-       if (ctx->expired && ctx->tintv.tv64)
-               hrtimer_forward_now(&ctx->tmr, ctx->tintv);
+       if (ctx->expired && ctx->tintv.tv64) {
+               if (isalarm(ctx))
+                       alarm_forward_now(&ctx->t.alarm, ctx->tintv);
+               else
+                       hrtimer_forward_now(&ctx->t.tmr, ctx->tintv);
+       }
 
        old->it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
        old->it_interval = ktime_to_timespec(ctx->tintv);
@@ -345,9 +422,18 @@ static int do_timerfd_gettime(int ufd, struct itimerspec *t)
        spin_lock_irq(&ctx->wqh.lock);
        if (ctx->expired && ctx->tintv.tv64) {
                ctx->expired = 0;
-               ctx->ticks +=
-                       hrtimer_forward_now(&ctx->tmr, ctx->tintv) - 1;
-               hrtimer_restart(&ctx->tmr);
+
+               if (isalarm(ctx)) {
+                       ctx->ticks +=
+                               alarm_forward_now(
+                                       &ctx->t.alarm, ctx->tintv) - 1;
+                       alarm_restart(&ctx->t.alarm);
+               } else {
+                       ctx->ticks +=
+                               hrtimer_forward_now(&ctx->t.tmr, ctx->tintv)
+                               - 1;
+                       hrtimer_restart(&ctx->t.tmr);
+               }
        }
        t->it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
        t->it_interval = ktime_to_timespec(ctx->tintv);
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
new file mode 100644 (file)
index 0000000..a460889
--- /dev/null
@@ -0,0 +1,2 @@
+header-y += if_pppolac.h
+header-y += if_pppopns.h
index 9069694e70eb9eee8b1bf4e229f29f7959ce3b46..a899402a5a0e6325c0b3dce2f03c7dca06846238 100644 (file)
@@ -44,10 +44,14 @@ struct alarm {
 void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
                enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
 int alarm_start(struct alarm *alarm, ktime_t start);
+int alarm_start_relative(struct alarm *alarm, ktime_t start);
+void alarm_restart(struct alarm *alarm);
 int alarm_try_to_cancel(struct alarm *alarm);
 int alarm_cancel(struct alarm *alarm);
 
 u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval);
+u64 alarm_forward_now(struct alarm *alarm, ktime_t interval);
+ktime_t alarm_expires_remaining(const struct alarm *alarm);
 
 /* Provide way to access the rtc device being used by alarmtimers */
 struct rtc_device *alarmtimer_get_rtcdev(void);
index 32a89cf5ec4589b51734f6e65b91a5e9dd6ac53f..9d1d9caf2611615488fb6a94547358ce288777a9 100644 (file)
@@ -5,6 +5,15 @@
 #define AMBA_MMCI_H
 
 #include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+
+struct embedded_sdio_data {
+        struct sdio_cis cis;
+        struct sdio_cccr cccr;
+        struct sdio_embedded_func *funcs;
+        int num_funcs;
+};
 
 
 /*
@@ -73,6 +82,9 @@ struct mmci_platform_data {
        bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
        void *dma_rx_param;
        void *dma_tx_param;
+       unsigned int status_irq;
+       struct embedded_sdio_data *embedded_sdio;
+       int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
 };
 
 #endif
diff --git a/include/linux/android_aid.h b/include/linux/android_aid.h
new file mode 100644 (file)
index 0000000..06264b8
--- /dev/null
@@ -0,0 +1,28 @@
+/* include/linux/android_aid.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_AID_H
+#define _LINUX_ANDROID_AID_H
+
+/* AIDs that the kernel treats differently */
+#define AID_OBSOLETE_000 3001  /* was NET_BT_ADMIN */
+#define AID_OBSOLETE_001 3002  /* was NET_BT */
+#define AID_INET         3003
+#define AID_NET_RAW      3004
+#define AID_NET_ADMIN    3005
+#define AID_NET_BW_STATS 3006  /* read bandwidth statistics */
+#define AID_NET_BW_ACCT  3007  /* change bandwidth statistics accounting */
+
+#endif
index 8852d370c720373b5d92025fc39d46a5ec59e7b7..32a4f95d0bd75b3c1bffba0a5ba2d878be6faec8 100644 (file)
@@ -578,6 +578,7 @@ struct cgroup_subsys {
        void (*css_offline)(struct cgroup *cgrp);
        void (*css_free)(struct cgroup *cgrp);
 
+       int (*allow_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
        int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
        void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
        void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
index 322e0afc86343360185bd258865ac8464fafeaf0..3bf2bc41e41c7250284c6badefa2115646f38acf 100644 (file)
@@ -233,4 +233,11 @@ void arch_cpu_idle_enter(void);
 void arch_cpu_idle_exit(void);
 void arch_cpu_idle_dead(void);
 
+#define IDLE_START 1
+#define IDLE_END 2
+
+void idle_notifier_register(struct notifier_block *n);
+void idle_notifier_unregister(struct notifier_block *n);
+void idle_notifier_call_chain(unsigned long val);
+
 #endif /* _LINUX_CPU_H_ */
index 69cb5cc810130fefc2dadef90ed8ac4bc4a652ed..c22e4ebb4f7063aa53dc118cbc70258c75932527 100644 (file)
@@ -350,9 +350,11 @@ const char *cpufreq_get_current_driver(void);
 /*********************************************************************
  *                        CPUFREQ 2.6. INTERFACE                     *
  *********************************************************************/
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
 int cpufreq_update_policy(unsigned int cpu);
 bool have_governor_per_policy(void);
+struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
 
 #ifdef CONFIG_CPU_FREQ
 /* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */
@@ -406,6 +408,9 @@ extern struct cpufreq_governor cpufreq_gov_ondemand;
 #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
 extern struct cpufreq_governor cpufreq_gov_conservative;
 #define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_conservative)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
+extern struct cpufreq_governor cpufreq_gov_interactive;
+#define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_interactive)
 #endif
 
 
index 21ca773f77bf7fd2543b111371279c4ef749722d..822c1354f3a69dae20e7f6cacaf340b40ea9d7c4 100644 (file)
@@ -51,7 +51,7 @@ struct task_struct;
 extern void debug_show_all_locks(void);
 extern void debug_show_held_locks(struct task_struct *task);
 extern void debug_check_no_locks_freed(const void *from, unsigned long len);
-extern void debug_check_no_locks_held(struct task_struct *task);
+extern void debug_check_no_locks_held(void);
 #else
 static inline void debug_show_all_locks(void)
 {
@@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len)
 }
 
 static inline void
-debug_check_no_locks_held(struct task_struct *task)
+debug_check_no_locks_held(void)
 {
 }
 #endif
index e70df40d84f6fe83c72f732aa44b454148661b6e..7fd81b8c48971676cd52362f09722dfd2dd28153 100644 (file)
@@ -3,6 +3,7 @@
 #ifndef FREEZER_H_INCLUDED
 #define FREEZER_H_INCLUDED
 
+#include <linux/debug_locks.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <linux/atomic.h>
@@ -46,7 +47,11 @@ extern int freeze_kernel_threads(void);
 extern void thaw_processes(void);
 extern void thaw_kernel_threads(void);
 
-static inline bool try_to_freeze(void)
+/*
+ * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
+ * If try_to_freeze causes a lockdep warning it means the caller may deadlock
+ */
+static inline bool try_to_freeze_unsafe(void)
 {
        might_sleep();
        if (likely(!freezing(current)))
@@ -54,6 +59,13 @@ static inline bool try_to_freeze(void)
        return __refrigerator(false);
 }
 
+static inline bool try_to_freeze(void)
+{
+       if (!(current->flags & PF_NOFREEZE))
+               debug_check_no_locks_held();
+       return try_to_freeze_unsafe();
+}
+
 extern bool freeze_task(struct task_struct *p);
 extern bool set_freezable(void);
 
@@ -115,6 +127,14 @@ static inline void freezer_count(void)
        try_to_freeze();
 }
 
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline void freezer_count_unsafe(void)
+{
+       current->flags &= ~PF_FREEZER_SKIP;
+       smp_mb();
+       try_to_freeze_unsafe();
+}
+
 /**
  * freezer_should_skip - whether to skip a task when determining frozen
  *                      state is reached
@@ -139,28 +159,86 @@ static inline bool freezer_should_skip(struct task_struct *p)
 }
 
 /*
- * These macros are intended to be used whenever you want allow a sleeping
+ * These functions are intended to be used whenever you want allow a sleeping
  * task to be frozen. Note that neither return any clear indication of
  * whether a freeze event happened while in this function.
  */
 
 /* Like schedule(), but should not block the freezer. */
-#define freezable_schedule()                                           \
-({                                                                     \
-       freezer_do_not_count();                                         \
-       schedule();                                                     \
-       freezer_count();                                                \
-})
+static inline void freezable_schedule(void)
+{
+       freezer_do_not_count();
+       schedule();
+       freezer_count();
+}
+
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline void freezable_schedule_unsafe(void)
+{
+       freezer_do_not_count();
+       schedule();
+       freezer_count_unsafe();
+}
+
+/*
+ * Like freezable_schedule_timeout(), but should not block the freezer.  Do not
+ * call this with locks held.
+ */
+static inline long freezable_schedule_timeout(long timeout)
+{
+       long __retval;
+       freezer_do_not_count();
+       __retval = schedule_timeout(timeout);
+       freezer_count();
+       return __retval;
+}
+
+/*
+ * Like schedule_timeout_interruptible(), but should not block the freezer.  Do not
+ * call this with locks held.
+ */
+static inline long freezable_schedule_timeout_interruptible(long timeout)
+{
+       long __retval;
+       freezer_do_not_count();
+       __retval = schedule_timeout_interruptible(timeout);
+       freezer_count();
+       return __retval;
+}
 
 /* Like schedule_timeout_killable(), but should not block the freezer. */
-#define freezable_schedule_timeout_killable(timeout)                   \
-({                                                                     \
-       long __retval;                                                  \
-       freezer_do_not_count();                                         \
-       __retval = schedule_timeout_killable(timeout);                  \
-       freezer_count();                                                \
-       __retval;                                                       \
-})
+static inline long freezable_schedule_timeout_killable(long timeout)
+{
+       long __retval;
+       freezer_do_not_count();
+       __retval = schedule_timeout_killable(timeout);
+       freezer_count();
+       return __retval;
+}
+
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
+{
+       long __retval;
+       freezer_do_not_count();
+       __retval = schedule_timeout_killable(timeout);
+       freezer_count_unsafe();
+       return __retval;
+}
+
+/*
+ * Like schedule_hrtimeout_range(), but should not block the freezer.  Do not
+ * call this with locks held.
+ */
+static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
+               unsigned long delta, const enum hrtimer_mode mode)
+{
+       int __retval;
+       freezer_do_not_count();
+       __retval = schedule_hrtimeout_range(expires, delta, mode);
+       freezer_count();
+       return __retval;
+}
 
 /*
  * Freezer-friendly wrappers around wait_event_interruptible(),
@@ -177,33 +255,45 @@ static inline bool freezer_should_skip(struct task_struct *p)
        __retval;                                                       \
 })
 
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+#define wait_event_freezekillable_unsafe(wq, condition)                        \
+({                                                                     \
+       int __retval;                                                   \
+       freezer_do_not_count();                                         \
+       __retval = wait_event_killable(wq, (condition));                \
+       freezer_count_unsafe();                                         \
+       __retval;                                                       \
+})
+
 #define wait_event_freezable(wq, condition)                            \
 ({                                                                     \
        int __retval;                                                   \
-       for (;;) {                                                      \
-               __retval = wait_event_interruptible(wq,                 \
-                               (condition) || freezing(current));      \
-               if (__retval || (condition))                            \
-                       break;                                          \
-               try_to_freeze();                                        \
-       }                                                               \
+       freezer_do_not_count();                                         \
+       __retval = wait_event_interruptible(wq, (condition));           \
+       freezer_count();                                                \
        __retval;                                                       \
 })
 
 #define wait_event_freezable_timeout(wq, condition, timeout)           \
 ({                                                                     \
        long __retval = timeout;                                        \
-       for (;;) {                                                      \
-               __retval = wait_event_interruptible_timeout(wq,         \
-                               (condition) || freezing(current),       \
-                               __retval);                              \
-               if (__retval <= 0 || (condition))                       \
-                       break;                                          \
-               try_to_freeze();                                        \
-       }                                                               \
+       freezer_do_not_count();                                         \
+       __retval = wait_event_interruptible_timeout(wq, (condition),    \
+                               __retval);                              \
+       freezer_count();                                                \
        __retval;                                                       \
 })
 
+#define wait_event_freezable_exclusive(wq, condition)                  \
+({                                                                     \
+       int __retval;                                                   \
+       freezer_do_not_count();                                         \
+       __retval = wait_event_interruptible_exclusive(wq, condition);   \
+       freezer_count();                                                \
+       __retval;                                                       \
+})
+
+
 #else /* !CONFIG_FREEZER */
 static inline bool frozen(struct task_struct *p) { return false; }
 static inline bool freezing(struct task_struct *p) { return false; }
@@ -225,18 +315,37 @@ static inline void set_freezable(void) {}
 
 #define freezable_schedule()  schedule()
 
+#define freezable_schedule_unsafe()  schedule()
+
+#define freezable_schedule_timeout(timeout)  schedule_timeout(timeout)
+
+#define freezable_schedule_timeout_interruptible(timeout)              \
+       schedule_timeout_interruptible(timeout)
+
 #define freezable_schedule_timeout_killable(timeout)                   \
        schedule_timeout_killable(timeout)
 
+#define freezable_schedule_timeout_killable_unsafe(timeout)            \
+       schedule_timeout_killable(timeout)
+
+#define freezable_schedule_hrtimeout_range(expires, delta, mode)       \
+       schedule_hrtimeout_range(expires, delta, mode)
+
 #define wait_event_freezable(wq, condition)                            \
                wait_event_interruptible(wq, condition)
 
 #define wait_event_freezable_timeout(wq, condition, timeout)           \
                wait_event_interruptible_timeout(wq, condition, timeout)
 
+#define wait_event_freezable_exclusive(wq, condition)                  \
+               wait_event_interruptible_exclusive(wq, condition)
+
 #define wait_event_freezekillable(wq, condition)               \
                wait_event_killable(wq, condition)
 
+#define wait_event_freezekillable_unsafe(wq, condition)                        \
+               wait_event_killable(wq, condition)
+
 #endif /* !CONFIG_FREEZER */
 
 #endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/gpio_event.h b/include/linux/gpio_event.h
new file mode 100644 (file)
index 0000000..2613fc5
--- /dev/null
@@ -0,0 +1,170 @@
+/* include/linux/gpio_event.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_GPIO_EVENT_H
+#define _LINUX_GPIO_EVENT_H
+
+#include <linux/input.h>
+
+struct gpio_event_input_devs {
+       int count;
+       struct input_dev *dev[];
+};
+enum {
+       GPIO_EVENT_FUNC_UNINIT  = 0x0,
+       GPIO_EVENT_FUNC_INIT    = 0x1,
+       GPIO_EVENT_FUNC_SUSPEND = 0x2,
+       GPIO_EVENT_FUNC_RESUME  = 0x3,
+};
+struct gpio_event_info {
+       int (*func)(struct gpio_event_input_devs *input_devs,
+                   struct gpio_event_info *info,
+                   void **data, int func);
+       int (*event)(struct gpio_event_input_devs *input_devs,
+                    struct gpio_event_info *info,
+                    void **data, unsigned int dev, unsigned int type,
+                    unsigned int code, int value); /* out events */
+       bool no_suspend;
+};
+
+struct gpio_event_platform_data {
+       const char *name;
+       struct gpio_event_info **info;
+       size_t info_count;
+       int (*power)(const struct gpio_event_platform_data *pdata, bool on);
+       const char *names[]; /* If name is NULL, names contain a NULL */
+                            /* terminated list of input devices to create */
+};
+
+#define GPIO_EVENT_DEV_NAME "gpio-event"
+
+/* Key matrix */
+
+enum gpio_event_matrix_flags {
+       /* unset: drive active output low, set: drive active output high */
+       GPIOKPF_ACTIVE_HIGH              = 1U << 0,
+       GPIOKPF_DEBOUNCE                 = 1U << 1,
+       GPIOKPF_REMOVE_SOME_PHANTOM_KEYS = 1U << 2,
+       GPIOKPF_REMOVE_PHANTOM_KEYS      = GPIOKPF_REMOVE_SOME_PHANTOM_KEYS |
+                                          GPIOKPF_DEBOUNCE,
+       GPIOKPF_DRIVE_INACTIVE           = 1U << 3,
+       GPIOKPF_LEVEL_TRIGGERED_IRQ      = 1U << 4,
+       GPIOKPF_PRINT_UNMAPPED_KEYS      = 1U << 16,
+       GPIOKPF_PRINT_MAPPED_KEYS        = 1U << 17,
+       GPIOKPF_PRINT_PHANTOM_KEYS       = 1U << 18,
+};
+
+#define MATRIX_CODE_BITS (10)
+#define MATRIX_KEY_MASK ((1U << MATRIX_CODE_BITS) - 1)
+#define MATRIX_KEY(dev, code) \
+       (((dev) << MATRIX_CODE_BITS) | (code & MATRIX_KEY_MASK))
+
+extern int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+                       struct gpio_event_info *info, void **data, int func);
+struct gpio_event_matrix_info {
+       /* initialize to gpio_event_matrix_func */
+       struct gpio_event_info info;
+       /* size must be ninputs * noutputs */
+       const unsigned short *keymap;
+       unsigned int *input_gpios;
+       unsigned int *output_gpios;
+       unsigned int ninputs;
+       unsigned int noutputs;
+       /* time to wait before reading inputs after driving each output */
+       ktime_t settle_time;
+       /* time to wait before scanning the keypad a second time */
+       ktime_t debounce_delay;
+       ktime_t poll_time;
+       unsigned flags;
+};
+
+/* Directly connected inputs and outputs */
+
+enum gpio_event_direct_flags {
+       GPIOEDF_ACTIVE_HIGH         = 1U << 0,
+/*     GPIOEDF_USE_DOWN_IRQ        = 1U << 1, */
+/*     GPIOEDF_USE_IRQ             = (1U << 2) | GPIOIDF_USE_DOWN_IRQ, */
+       GPIOEDF_PRINT_KEYS          = 1U << 8,
+       GPIOEDF_PRINT_KEY_DEBOUNCE  = 1U << 9,
+       GPIOEDF_PRINT_KEY_UNSTABLE  = 1U << 10,
+};
+
+struct gpio_event_direct_entry {
+       uint32_t gpio:16;
+       uint32_t code:10;
+       uint32_t dev:6;
+};
+
+/* inputs */
+extern int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+                       struct gpio_event_info *info, void **data, int func);
+struct gpio_event_input_info {
+       /* initialize to gpio_event_input_func */
+       struct gpio_event_info info;
+       ktime_t debounce_time;
+       ktime_t poll_time;
+       uint16_t flags;
+       uint16_t type;
+       const struct gpio_event_direct_entry *keymap;
+       size_t keymap_size;
+};
+
+/* outputs */
+extern int gpio_event_output_func(struct gpio_event_input_devs *input_devs,
+                       struct gpio_event_info *info, void **data, int func);
+extern int gpio_event_output_event(struct gpio_event_input_devs *input_devs,
+                       struct gpio_event_info *info, void **data,
+                       unsigned int dev, unsigned int type,
+                       unsigned int code, int value);
+struct gpio_event_output_info {
+       /* initialize to gpio_event_output_func and gpio_event_output_event */
+       struct gpio_event_info info;
+       uint16_t flags;
+       uint16_t type;
+       const struct gpio_event_direct_entry *keymap;
+       size_t keymap_size;
+};
+
+
+/* axes */
+
+enum gpio_event_axis_flags {
+       GPIOEAF_PRINT_UNKNOWN_DIRECTION  = 1U << 16,
+       GPIOEAF_PRINT_RAW                = 1U << 17,
+       GPIOEAF_PRINT_EVENT              = 1U << 18,
+};
+
+extern int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+                       struct gpio_event_info *info, void **data, int func);
+struct gpio_event_axis_info {
+       /* initialize to gpio_event_axis_func */
+       struct gpio_event_info info;
+       uint8_t  count; /* number of gpios for this axis */
+       uint8_t  dev; /* device index when using multiple input devices */
+       uint8_t  type; /* EV_REL or EV_ABS */
+       uint16_t code;
+       uint16_t decoded_size;
+       uint16_t (*map)(struct gpio_event_axis_info *info, uint16_t in);
+       uint32_t *gpio;
+       uint32_t flags;
+};
+#define gpio_axis_2bit_gray_map gpio_axis_4bit_gray_map
+#define gpio_axis_3bit_gray_map gpio_axis_4bit_gray_map
+uint16_t gpio_axis_4bit_gray_map(
+                       struct gpio_event_axis_info *info, uint16_t in);
+uint16_t gpio_axis_5bit_singletrack_map(
+                       struct gpio_event_axis_info *info, uint16_t in);
+
+#endif
index 4f8aa4733fb65ab6be3558de3f32d921b66ec95f..40e9c9d76d1eb9efd96c32a110a5b3edb39e0450 100644 (file)
@@ -649,8 +649,8 @@ struct hid_driver {
        int (*input_mapped)(struct hid_device *hdev,
                        struct hid_input *hidinput, struct hid_field *field,
                        struct hid_usage *usage, unsigned long **bit, int *max);
-       void (*input_configured)(struct hid_device *hdev,
-                                struct hid_input *hidinput);
+       int (*input_configured)(struct hid_device *hdev,
+                               struct hid_input *hidinput);
        void (*feature_mapping)(struct hid_device *hdev,
                        struct hid_field *field,
                        struct hid_usage *usage);
diff --git a/include/linux/if_pppolac.h b/include/linux/if_pppolac.h
new file mode 100644 (file)
index 0000000..e40aa10
--- /dev/null
@@ -0,0 +1,23 @@
+/* include/linux/if_pppolac.h
+ *
+ * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOLAC_H
+#define __LINUX_IF_PPPOLAC_H
+
+#include <uapi/linux/if_pppolac.h>
+
+#endif /* __LINUX_IF_PPPOLAC_H */
diff --git a/include/linux/if_pppopns.h b/include/linux/if_pppopns.h
new file mode 100644 (file)
index 0000000..4ac621a
--- /dev/null
@@ -0,0 +1,23 @@
+/* include/linux/if_pppopns.h
+ *
+ * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOPNS_H
+#define __LINUX_IF_PPPOPNS_H
+
+#include <uapi/linux/if_pppopns.h>
+
+#endif /* __LINUX_IF_PPPOPNS_H */
index aff7ad8a4ea3cdea45daca2049a7b83dc8151cc3..dccd621d6377e8ecf0df54d4ccef87f8e400c086 100644 (file)
@@ -41,6 +41,25 @@ struct pptp_opt {
        u32 seq_sent, seq_recv;
        int ppp_flags;
 };
+
+struct pppolac_opt {
+       __u32           local;
+       __u32           remote;
+       __u32           recv_sequence;
+       __u32           xmit_sequence;
+       atomic_t        sequencing;
+       int             (*backlog_rcv)(struct sock *sk_udp, struct sk_buff *skb);
+};
+
+struct pppopns_opt {
+       __u16           local;
+       __u16           remote;
+       __u32           recv_sequence;
+       __u32           xmit_sequence;
+       void            (*data_ready)(struct sock *sk_raw, int length);
+       int             (*backlog_rcv)(struct sock *sk_raw, struct sk_buff *skb);
+};
+
 #include <net/sock.h>
 
 struct pppox_sock {
@@ -51,6 +70,8 @@ struct pppox_sock {
        union {
                struct pppoe_opt pppoe;
                struct pptp_opt  pptp;
+               struct pppolac_opt lac;
+               struct pppopns_opt pns;
        } proto;
        __be16                  num;
 };
index b8b7dc755752d8cfe6337ca92062b196cd19f073..97e3f0926a453c57384b2550aa81df5cf4c9304b 100644 (file)
@@ -36,6 +36,7 @@ struct ipv6_devconf {
        __s32           accept_ra_rt_info_max_plen;
 #endif
 #endif
+       __s32           accept_ra_rt_table;
        __s32           proxy_ndp;
        __s32           accept_source_route;
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
index e9ef6d6b51d5b07f6471e96dc20efa5685416220..eabc56d0f8240519d061dbc5053f2f0379caedb0 100644 (file)
@@ -792,4 +792,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
 # define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
 #endif
 
+/* To identify board information in panic logs, set this */
+extern char *mach_panic_string;
+
 #endif
diff --git a/include/linux/keychord.h b/include/linux/keychord.h
new file mode 100644 (file)
index 0000000..08cf540
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ *  Key chord input driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_KEYCHORD_H_
+#define __LINUX_KEYCHORD_H_
+
+#include <uapi/linux/keychord.h>
+
+#endif /* __LINUX_KEYCHORD_H_ */
diff --git a/include/linux/keycombo.h b/include/linux/keycombo.h
new file mode 100644 (file)
index 0000000..c6db262
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * include/linux/keycombo.h - platform data structure for keycombo driver
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_KEYCOMBO_H
+#define _LINUX_KEYCOMBO_H
+
+#define KEYCOMBO_NAME "keycombo"
+
+/*
+ * if key_down_fn and key_up_fn are both present, you are guaranteed that
+ * key_down_fn will return before key_up_fn is called, and that key_up_fn
+ * is called iff key_down_fn is called.
+ */
+struct keycombo_platform_data {
+       void (*key_down_fn)(void *);
+       void (*key_up_fn)(void *);
+       void *priv;
+       int key_down_delay; /* Time in ms */
+       int *keys_up;
+       int keys_down[]; /* 0 terminated */
+};
+
+#endif /* _LINUX_KEYCOMBO_H */
diff --git a/include/linux/keyreset.h b/include/linux/keyreset.h
new file mode 100644 (file)
index 0000000..2e34afa
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * include/linux/keyreset.h - platform data structure for resetkeys driver
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_KEYRESET_H
+#define _LINUX_KEYRESET_H
+
+#define KEYRESET_NAME "keyreset"
+
+struct keyreset_platform_data {
+       int (*reset_fn)(void);
+       int key_down_delay;
+       int *keys_up;
+       int keys_down[]; /* 0 terminated */
+};
+
+#endif /* _LINUX_KEYRESET_H */
index a9a48309f0458aa8faf408dc06edf9a623d9c5c4..fc3883852f9e9e58e17f69bf374b37d6fef312c8 100644 (file)
@@ -921,6 +921,7 @@ extern void pagefault_out_of_memory(void);
 extern void show_free_areas(unsigned int flags);
 extern bool skip_free_areas_node(unsigned int flags, int nid);
 
+void shmem_set_file(struct vm_area_struct *vma, struct file *file);
 int shmem_zero_setup(struct vm_area_struct *);
 
 extern int can_do_mlock(void);
@@ -1498,7 +1499,7 @@ extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
 extern struct vm_area_struct *vma_merge(struct mm_struct *,
        struct vm_area_struct *prev, unsigned long addr, unsigned long end,
        unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
-       struct mempolicy *);
+       struct mempolicy *, const char __user *);
 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
 extern int split_vma(struct mm_struct *,
        struct vm_area_struct *, unsigned long addr, int new_below);
index 10a9a17342fcd83be68560170b1528262880a8fc..c14df44a95672f5404952a56d3606aa59fa77b72 100644 (file)
@@ -255,6 +255,10 @@ struct vm_area_struct {
         * For areas with an address space and backing store,
         * linkage into the address_space->i_mmap interval tree, or
         * linkage of vma in the address_space->i_mmap_nonlinear list.
+        *
+        * For private anonymous mappings, a pointer to a null terminated string
+        * in the user process containing the name given to the vma, or NULL
+        * if unnamed.
         */
        union {
                struct {
@@ -262,6 +266,7 @@ struct vm_area_struct {
                        unsigned long rb_subtree_last;
                } linear;
                struct list_head nonlinear;
+               const char __user *anon_name;
        } shared;
 
        /*
@@ -465,6 +470,14 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
        return mm->cpu_vm_mask_var;
 }
 
+/* Return the name for an anonymous mapping or NULL for a file-backed mapping */
+static inline const char __user *vma_get_anon_name(struct vm_area_struct *vma)
+{
+       if (vma->vm_file)
+               return NULL;
+
+       return vma->shared.anon_name;
+}
 #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
 /*
  * Memory barriers to keep this state in sync are graciously provided by
index e326ae2882a0c5155045a594186a5ded1768444f..66b4659276026e3769d859573007c2d1f06ac294 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/device.h>
 #include <linux/fault-inject.h>
+#include <linux/wakelock.h>
 
 #include <linux/mmc/core.h>
 #include <linux/mmc/pm.h>
@@ -329,12 +330,17 @@ struct mmc_host {
        int                     claim_cnt;      /* "claim" nesting count */
 
        struct delayed_work     detect;
+       struct wake_lock        detect_wake_lock;
        int                     detect_change;  /* card detect flag */
        struct mmc_slot         slot;
 
        const struct mmc_bus_ops *bus_ops;      /* current bus driver */
        unsigned int            bus_refs;       /* reference counter */
 
+       unsigned int            bus_resume_flags;
+#define MMC_BUSRESUME_MANUAL_RESUME    (1 << 0)
+#define MMC_BUSRESUME_NEEDS_RESUME     (1 << 1)
+
        unsigned int            sdio_irqs;
        struct task_struct      *sdio_irq_thread;
        bool                    sdio_irq_pending;
@@ -362,6 +368,15 @@ struct mmc_host {
 
        unsigned int            slotno; /* used for sdio acpi binding */
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+       struct {
+               struct sdio_cis                 *cis;
+               struct sdio_cccr                *cccr;
+               struct sdio_embedded_func       *funcs;
+               int                             num_funcs;
+       } embedded_sdio_data;
+#endif
+
        unsigned long           private[0] ____cacheline_aligned;
 };
 
@@ -371,6 +386,14 @@ void mmc_remove_host(struct mmc_host *);
 void mmc_free_host(struct mmc_host *);
 void mmc_of_parse(struct mmc_host *host);
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+extern void mmc_set_embedded_sdio_data(struct mmc_host *host,
+                                      struct sdio_cis *cis,
+                                      struct sdio_cccr *cccr,
+                                      struct sdio_embedded_func *funcs,
+                                      int num_funcs);
+#endif
+
 static inline void *mmc_priv(struct mmc_host *host)
 {
        return (void *)host->private;
@@ -381,6 +404,18 @@ static inline void *mmc_priv(struct mmc_host *host)
 #define mmc_dev(x)     ((x)->parent)
 #define mmc_classdev(x)        (&(x)->class_dev)
 #define mmc_hostname(x)        (dev_name(&(x)->class_dev))
+#define mmc_bus_needs_resume(host) ((host)->bus_resume_flags & MMC_BUSRESUME_NEEDS_RESUME)
+#define mmc_bus_manual_resume(host) ((host)->bus_resume_flags & MMC_BUSRESUME_MANUAL_RESUME)
+
+static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual)
+{
+       if (manual)
+               host->bus_resume_flags |= MMC_BUSRESUME_MANUAL_RESUME;
+       else
+               host->bus_resume_flags &= ~MMC_BUSRESUME_MANUAL_RESUME;
+}
+
+extern int mmc_resume_bus(struct mmc_host *host);
 
 int mmc_suspend_host(struct mmc_host *);
 int mmc_resume_host(struct mmc_host *);
index 4a139204c20c0bb8aab7a7759c74e7e5d5cde9a4..6e2d6a135c7e0d75f830af3a429fb5bc02ca07b2 100644 (file)
@@ -26,5 +26,6 @@ typedef unsigned int mmc_pm_flag_t;
 
 #define MMC_PM_KEEP_POWER      (1 << 0)        /* preserve card power during suspend */
 #define MMC_PM_WAKE_SDIO_IRQ   (1 << 1)        /* wake up host system on SDIO IRQ assertion */
+#define MMC_PM_IGNORE_PM_NOTIFY        (1 << 2)        /* ignore mmc pm notify */
 
 #endif /* LINUX_MMC_PM_H */
old mode 100644 (file)
new mode 100755 (executable)
index 50f0bc9..dc680c4
@@ -22,6 +22,14 @@ struct sdio_func;
 
 typedef void (sdio_irq_handler_t)(struct sdio_func *);
 
+/*
+ * Structure used to hold embedded SDIO device data from platform layer
+ */
+struct sdio_embedded_func {
+       uint8_t f_class;
+       uint32_t f_maxblksize;
+};
+
 /*
  * SDIO function CIS tuple (unknown to the core)
  */
@@ -130,6 +138,8 @@ extern int sdio_release_irq(struct sdio_func *func);
 extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz);
 
 extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret);
+extern u8 sdio_readb_ext(struct sdio_func *func, unsigned int addr, int *err_ret,
+       unsigned in);
 extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret);
 extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret);
 
diff --git a/include/linux/netfilter/xt_qtaguid.h b/include/linux/netfilter/xt_qtaguid.h
new file mode 100644 (file)
index 0000000..ca60fbd
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef _XT_QTAGUID_MATCH_H
+#define _XT_QTAGUID_MATCH_H
+
+/* For now we just replace the xt_owner.
+ * FIXME: make iptables aware of qtaguid. */
+#include <linux/netfilter/xt_owner.h>
+
+#define XT_QTAGUID_UID    XT_OWNER_UID
+#define XT_QTAGUID_GID    XT_OWNER_GID
+#define XT_QTAGUID_SOCKET XT_OWNER_SOCKET
+#define xt_qtaguid_match_info xt_owner_match_info
+
+#endif /* _XT_QTAGUID_MATCH_H */
diff --git a/include/linux/netfilter/xt_quota2.h b/include/linux/netfilter/xt_quota2.h
new file mode 100644 (file)
index 0000000..eadc690
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef _XT_QUOTA_H
+#define _XT_QUOTA_H
+
+enum xt_quota_flags {
+       XT_QUOTA_INVERT    = 1 << 0,
+       XT_QUOTA_GROW      = 1 << 1,
+       XT_QUOTA_PACKET    = 1 << 2,
+       XT_QUOTA_NO_CHANGE = 1 << 3,
+       XT_QUOTA_MASK      = 0x0F,
+};
+
+struct xt_quota_counter;
+
+struct xt_quota_mtinfo2 {
+       char name[15];
+       u_int8_t flags;
+
+       /* Comparison-invariant */
+       aligned_u64 quota;
+
+       /* Used internally by the kernel */
+       struct xt_quota_counter *master __attribute__((aligned(8)));
+};
+
+#endif /* _XT_QUOTA_H */
index db50840e6355ece37b36106cb3a9fa6d3939f8d6..c8f8aa0383e51d91d3082b503f45cb0ba8ca9bf1 100644 (file)
  * may be used to reset the timeout - for code which intentionally
  * disables interrupts for a long time. This call is stateless.
  */
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR_NMI)
 #include <asm/nmi.h>
+#endif
+
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
 extern void touch_nmi_watchdog(void);
 #else
 static inline void touch_nmi_watchdog(void)
index c9722fdf39c5eb15e88de249db5531757e267499..7c00ee271395a0150311b82ff6a7acda911050f2 100644 (file)
@@ -90,6 +90,27 @@ extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
 extern int of_flat_dt_match(unsigned long node, const char *const *matches);
 extern unsigned long of_get_flat_dt_root(void);
 
+/*
+ * early_init_dt_scan_chosen - scan the device tree for ramdisk and bootargs
+ *
+ * The boot arguments will be placed into the memory pointed to by @data.
+ * That memory should be COMMAND_LINE_SIZE big and initialized to be a valid
+ * (possibly empty) string.  Logic for what will be in @data after this
+ * function finishes:
+ *
+ * - CONFIG_CMDLINE_FORCE=true
+ *     CONFIG_CMDLINE
+ * - CONFIG_CMDLINE_EXTEND=true, @data is non-empty string
+ *     @data + dt bootargs (even if dt bootargs are empty)
+ * - CONFIG_CMDLINE_EXTEND=true, @data is empty string
+ *     CONFIG_CMDLINE + dt bootargs (even if dt bootargs are empty)
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=non-empty:
+ *     dt bootargs
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=empty, @data is non-empty string
+ *     @data is left unchanged
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=empty, @data is empty string
+ *     CONFIG_CMDLINE (or "" if that's not defined)
+ */
 extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
                                     int depth, void *data);
 extern void early_init_dt_check_for_initrd(unsigned long node);
diff --git a/include/linux/platform_data/ds2482.h b/include/linux/platform_data/ds2482.h
new file mode 100644 (file)
index 0000000..5a6879e
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PLATFORM_DATA_DS2482__
+#define __PLATFORM_DATA_DS2482__
+
+struct ds2482_platform_data {
+       int             slpz_gpio;
+};
+
+#endif /* __PLATFORM_DATA_DS2482__ */
index 3828cefb4f65876e635e5533d24233d32ab6590b..03d921feb09489060c6afabf2648fd34673a4ab1 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/workqueue.h>
 #include <linux/leds.h>
+#include <linux/types.h>
 
 struct device;
 
@@ -140,6 +141,12 @@ enum power_supply_property {
        POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
        POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */
        POWER_SUPPLY_PROP_SCOPE,
+       /* Local extensions */
+       POWER_SUPPLY_PROP_USB_HC,
+       POWER_SUPPLY_PROP_USB_OTG,
+       POWER_SUPPLY_PROP_CHARGE_ENABLED,
+       /* Local extensions of type int64_t */
+       POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
        /* Properties of type `const char *' */
        POWER_SUPPLY_PROP_MODEL_NAME,
        POWER_SUPPLY_PROP_MANUFACTURER,
@@ -160,6 +167,7 @@ enum power_supply_type {
 union power_supply_propval {
        int intval;
        const char *strval;
+       int64_t int64val;
 };
 
 struct power_supply {
@@ -194,6 +202,8 @@ struct power_supply {
        /* private */
        struct device *dev;
        struct work_struct changed_work;
+       spinlock_t changed_lock;
+       bool changed;
 #ifdef CONFIG_THERMAL
        struct thermal_zone_device *tzd;
        struct thermal_cooling_device *tcd;
index 9974975d40dba9ea9ba9fe5179ae9276995c7376..9e370618352a1bd86ccc6ced2d5ae3af0cc371cb 100644 (file)
@@ -67,6 +67,8 @@ void persistent_ram_free_old(struct persistent_ram_zone *prz);
 ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
        char *str, size_t len);
 
+void ramoops_console_write_buf(const char *buf, size_t size);
+
 /*
  * Ramoops platform data
  * @mem_size   memory size for ramoops
index 7d5161209082ddb044403ec01125a9303191bc00..a4c8bf7f47039fc143dfaf5bdb0073536592eba1 100644 (file)
@@ -1631,6 +1631,9 @@ static inline cputime_t task_gtime(struct task_struct *t)
 extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
 extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
 
+extern int task_free_register(struct notifier_block *n);
+extern int task_free_unregister(struct notifier_block *n);
+
 /*
  * Per process flags
  */
index 4686491852a70b0665633ee1bce5e44d344b3aa3..17e1888ff505c6fd35b08ce7a42d575fa6b553d3 100644 (file)
@@ -1402,6 +1402,11 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
 struct security_operations {
        char name[SECURITY_NAME_MAX + 1];
 
+       int (*binder_set_context_mgr) (struct task_struct *mgr);
+       int (*binder_transaction) (struct task_struct *from, struct task_struct *to);
+       int (*binder_transfer_binder) (struct task_struct *from, struct task_struct *to);
+       int (*binder_transfer_file) (struct task_struct *from, struct task_struct *to, struct file *file);
+
        int (*ptrace_access_check) (struct task_struct *child, unsigned int mode);
        int (*ptrace_traceme) (struct task_struct *parent);
        int (*capget) (struct task_struct *target,
@@ -1690,6 +1695,10 @@ extern void __init security_fixup_ops(struct security_operations *ops);
 
 
 /* Security operations */
+int security_binder_set_context_mgr(struct task_struct *mgr);
+int security_binder_transaction(struct task_struct *from, struct task_struct *to);
+int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to);
+int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file);
 int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
 int security_ptrace_traceme(struct task_struct *parent);
 int security_capget(struct task_struct *target,
@@ -1869,6 +1878,26 @@ static inline int security_init(void)
        return 0;
 }
 
+static inline int security_binder_set_context_mgr(struct task_struct *mgr)
+{
+       return 0;
+}
+
+static inline int security_binder_transaction(struct task_struct *from, struct task_struct *to)
+{
+       return 0;
+}
+
+static inline int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to)
+{
+       return 0;
+}
+
+static inline int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file)
+{
+       return 0;
+}
+
 static inline int security_ptrace_access_check(struct task_struct *child,
                                             unsigned int mode)
 {
index c2b355fd921abe3c96b431dc5f9c6413ecb308eb..21c074b2200e64ff1de4da77548898b3de351a88 100644 (file)
@@ -60,6 +60,7 @@ struct uart_ops {
        void            (*pm)(struct uart_port *, unsigned int state,
                              unsigned int oldstate);
        int             (*set_wake)(struct uart_port *, unsigned int state);
+       void            (*wake_peer)(struct uart_port *);
 
        /*
         * Return a string describing the type of the port
diff --git a/include/linux/switch.h b/include/linux/switch.h
new file mode 100644 (file)
index 0000000..3e4c748
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ *  Switch class driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_SWITCH_H__
+#define __LINUX_SWITCH_H__
+
+struct switch_dev {
+       const char      *name;
+       struct device   *dev;
+       int             index;
+       int             state;
+
+       ssize_t (*print_name)(struct switch_dev *sdev, char *buf);
+       ssize_t (*print_state)(struct switch_dev *sdev, char *buf);
+};
+
+struct gpio_switch_platform_data {
+       const char *name;
+       unsigned        gpio;
+
+       /* if NULL, switch_dev.name will be printed */
+       const char *name_on;
+       const char *name_off;
+       /* if NULL, "0" or "1" will be printed */
+       const char *state_on;
+       const char *state_off;
+};
+
+extern int switch_dev_register(struct switch_dev *sdev);
+extern void switch_dev_unregister(struct switch_dev *sdev);
+
+static inline int switch_get_state(struct switch_dev *sdev)
+{
+       return sdev->state;
+}
+
+extern void switch_set_state(struct switch_dev *sdev, int state);
+
+#endif /* __LINUX_SWITCH_H__ */
diff --git a/include/linux/uid_stat.h b/include/linux/uid_stat.h
new file mode 100644 (file)
index 0000000..6bd6c4e
--- /dev/null
@@ -0,0 +1,29 @@
+/* include/linux/uid_stat.h
+ *
+ * Copyright (C) 2008-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __uid_stat_h
+#define __uid_stat_h
+
+/* Contains definitions for resource tracking per uid. */
+
+#ifdef CONFIG_UID_STAT
+int uid_stat_tcp_snd(uid_t uid, int size);
+int uid_stat_tcp_rcv(uid_t uid, int size);
+#else
+#define uid_stat_tcp_snd(uid, size) do {} while (0);
+#define uid_stat_tcp_rcv(uid, size) do {} while (0);
+#endif
+
+#endif /* _LINUX_UID_STAT_H */
diff --git a/include/linux/usb/f_accessory.h b/include/linux/usb/f_accessory.h
new file mode 100644 (file)
index 0000000..ebe3c4d
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_ACCESSORY_H
+#define __LINUX_USB_F_ACCESSORY_H
+
+#include <uapi/linux/usb/f_accessory.h>
+
+#endif /* __LINUX_USB_F_ACCESSORY_H */
diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h
new file mode 100644 (file)
index 0000000..4e84177
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_MTP_H
+#define __LINUX_USB_F_MTP_H
+
+#include <uapi/linux/usb/f_mtp.h>
+
+#endif /* __LINUX_USB_F_MTP_H */
diff --git a/include/linux/wakelock.h b/include/linux/wakelock.h
new file mode 100644 (file)
index 0000000..f4a698a
--- /dev/null
@@ -0,0 +1,67 @@
+/* include/linux/wakelock.h
+ *
+ * Copyright (C) 2007-2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_WAKELOCK_H
+#define _LINUX_WAKELOCK_H
+
+#include <linux/ktime.h>
+#include <linux/device.h>
+
+/* A wake_lock prevents the system from entering suspend or other low power
+ * states when active. If the type is set to WAKE_LOCK_SUSPEND, the wake_lock
+ * prevents a full system suspend.
+ */
+
+enum {
+       WAKE_LOCK_SUSPEND, /* Prevent suspend */
+       WAKE_LOCK_TYPE_COUNT
+};
+
+struct wake_lock {
+       struct wakeup_source ws;
+};
+
+static inline void wake_lock_init(struct wake_lock *lock, int type,
+                                 const char *name)
+{
+       wakeup_source_init(&lock->ws, name);
+}
+
+static inline void wake_lock_destroy(struct wake_lock *lock)
+{
+       wakeup_source_trash(&lock->ws);
+}
+
+static inline void wake_lock(struct wake_lock *lock)
+{
+       __pm_stay_awake(&lock->ws);
+}
+
+static inline void wake_lock_timeout(struct wake_lock *lock, long timeout)
+{
+       __pm_wakeup_event(&lock->ws, jiffies_to_msecs(timeout));
+}
+
+static inline void wake_unlock(struct wake_lock *lock)
+{
+       __pm_relax(&lock->ws);
+}
+
+static inline int wake_lock_active(struct wake_lock *lock)
+{
+       return lock->ws.active;
+}
+
+#endif
diff --git a/include/linux/wakeup_reason.h b/include/linux/wakeup_reason.h
new file mode 100644 (file)
index 0000000..7ce50f0
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * include/linux/wakeup_reason.h
+ *
+ * Logs the reason which caused the kernel to resume
+ * from the suspend mode.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_WAKEUP_REASON_H
+#define _LINUX_WAKEUP_REASON_H
+
+void log_wakeup_reason(int irq);
+
+#endif /* _LINUX_WAKEUP_REASON_H */
diff --git a/include/linux/wifi_tiwlan.h b/include/linux/wifi_tiwlan.h
new file mode 100644 (file)
index 0000000..f07e067
--- /dev/null
@@ -0,0 +1,27 @@
+/* include/linux/wifi_tiwlan.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WIFI_TIWLAN_H_
+#define _LINUX_WIFI_TIWLAN_H_
+
+#include <linux/wlan_plat.h>
+
+#define WMPA_NUMBER_OF_SECTIONS        3
+#define WMPA_NUMBER_OF_BUFFERS 160
+#define WMPA_SECTION_HEADER    24
+#define WMPA_SECTION_SIZE_0    (WMPA_NUMBER_OF_BUFFERS * 64)
+#define WMPA_SECTION_SIZE_1    (WMPA_NUMBER_OF_BUFFERS * 256)
+#define WMPA_SECTION_SIZE_2    (WMPA_NUMBER_OF_BUFFERS * 2048)
+
+#endif
diff --git a/include/linux/wlan_plat.h b/include/linux/wlan_plat.h
new file mode 100644 (file)
index 0000000..40ec348
--- /dev/null
@@ -0,0 +1,27 @@
+/* include/linux/wlan_plat.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WLAN_PLAT_H_
+#define _LINUX_WLAN_PLAT_H_
+
+struct wifi_platform_data {
+       int (*set_power)(int val);
+       int (*set_reset)(int val);
+       int (*set_carddetect)(int val);
+       void *(*mem_prealloc)(int section, unsigned long size);
+       int (*get_mac_addr)(unsigned char *buf);
+       void *(*get_country_code)(char *ccode);
+};
+
+#endif
diff --git a/include/net/activity_stats.h b/include/net/activity_stats.h
new file mode 100644 (file)
index 0000000..10e4c15
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ */
+
+#ifndef __activity_stats_h
+#define __activity_stats_h
+
+#ifdef CONFIG_NET_ACTIVITY_STATS
+void activity_stats_update(void);
+#else
+#define activity_stats_update(void) {}
+#endif
+
+#endif /* _NET_ACTIVITY_STATS_H */
index 25100687babb70119ddf38985363862d8e096987..cfa6b2ebfc0e7e16afad1defae51a5feae4299c0 100644 (file)
@@ -190,6 +190,8 @@ static inline bool ipv6_is_mld(struct sk_buff *skb, int nexthdr, int offset)
 extern void addrconf_prefix_rcv(struct net_device *dev,
                                u8 *opt, int len, bool sllao);
 
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table);
+
 /*
  *     anycast prototypes (anycast.c)
  */
index 0ef00066dae85ec256a3fa815db6aab5d8d6251a..db43501b759909258dadf43195e830f4fa6b3e11 100644 (file)
@@ -199,8 +199,10 @@ enum {
 #define ESCO_2EV5      0x0100
 #define ESCO_3EV5      0x0200
 
-#define SCO_ESCO_MASK  (ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
-#define EDR_ESCO_MASK  (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
+#define SCO_ESCO_MASK  (ESCO_HV1 | ESCO_HV2 | ESCO_HV3)
+#define EDR_ESCO_MASK  (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5)
+#define ALL_ESCO_MASK  (SCO_ESCO_MASK | ESCO_EV3 | ESCO_EV4 | ESCO_EV5 | \
+                       EDR_ESCO_MASK)
 
 /* ACL flags */
 #define ACL_START_NO_FLUSH     0x00
@@ -1629,6 +1631,9 @@ struct hci_conn_info {
        __u8     out;
        __u16    state;
        __u32    link_mode;
+       __u32    mtu;
+       __u32    cnt;
+       __u32    pkts;
 };
 
 struct hci_dev_req {
index 7cb6d360d14702f04bebc0a2ad88a9466b33a607..57123eeb21aaf3ba95f5949aa49f657332e1ca10 100644 (file)
@@ -581,7 +581,8 @@ void hci_disconnect(struct hci_conn *conn, __u8 reason);
 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
 void hci_sco_setup(struct hci_conn *conn, __u8 status);
 
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
+                                       __u16 pkt_type, bdaddr_t *dst);
 int hci_conn_del(struct hci_conn *conn);
 void hci_conn_hash_flush(struct hci_dev *hdev);
 void hci_conn_check_pending(struct hci_dev *hdev);
@@ -591,7 +592,8 @@ void hci_chan_del(struct hci_chan *chan);
 void hci_chan_list_flush(struct hci_conn *conn);
 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
 
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
+                            __u16 pkt_type, bdaddr_t *dst,
                             __u8 dst_type, __u8 sec_level, __u8 auth_type);
 int hci_conn_check_link_mode(struct hci_conn *conn);
 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
@@ -654,7 +656,7 @@ static inline void hci_conn_drop(struct hci_conn *conn)
                        if (conn->state == BT_CONNECTED) {
                                timeo = conn->disc_timeout;
                                if (!conn->out)
-                                       timeo *= 2;
+                                       timeo *= 20;
                        } else {
                                timeo = msecs_to_jiffies(10);
                        }
index 1e35c43657c85c71560f2010242c4105701b6287..6d1857ab8e5f710926885ebf579db91584b09360 100644 (file)
@@ -37,6 +37,7 @@
 struct sockaddr_sco {
        sa_family_t     sco_family;
        bdaddr_t        sco_bdaddr;
+       __u16           sco_pkt_type;
 };
 
 /* SCO socket options */
@@ -72,7 +73,8 @@ struct sco_conn {
 
 struct sco_pinfo {
        struct bt_sock  bt;
-       __u32           flags;
+       __u16           pkt_type;
+
        struct sco_conn *conn;
 };
 
index 26b5b692c22bc93cbaa42f8cf6b4d9951d49c582..304e41381a1f8f2a2e34f828201016da08b4e876 100644 (file)
@@ -2254,22 +2254,28 @@ struct cfg80211_ops {
  * enum wiphy_flags - wiphy capability flags
  *
  * @WIPHY_FLAG_CUSTOM_REGULATORY:  tells us the driver for this device
- *     has its own custom regulatory domain and cannot identify the
- *     ISO / IEC 3166 alpha2 it belongs to. When this is enabled
- *     we will disregard the first regulatory hint (when the
- *     initiator is %REGDOM_SET_BY_CORE).
- * @WIPHY_FLAG_STRICT_REGULATORY: tells us the driver for this device will
- *     ignore regulatory domain settings until it gets its own regulatory
- *     domain via its regulatory_hint() unless the regulatory hint is
- *     from a country IE. After its gets its own regulatory domain it will
- *     only allow further regulatory domain settings to further enhance
- *     compliance. For example if channel 13 and 14 are disabled by this
- *     regulatory domain no user regulatory domain can enable these channels
- *     at a later time. This can be used for devices which do not have
- *     calibration information guaranteed for frequencies or settings
- *     outside of its regulatory domain. If used in combination with
- *     WIPHY_FLAG_CUSTOM_REGULATORY the inspected country IE power settings
- *     will be followed.
+ *     has its own custom regulatory domain and cannot identify the
+ *     ISO / IEC 3166 alpha2 it belongs to. When this is enabled
+ *     we will disregard the first regulatory hint (when the
+ *     initiator is %REGDOM_SET_BY_CORE). wiphys can set the custom
+ *     regulatory domain using wiphy_apply_custom_regulatory()
+ *     prior to wiphy registration.
+ * @WIPHY_FLAG_STRICT_REGULATORY: tells us that the wiphy for this device
+ *     has regulatory domain that it wishes to be considered as the
+ *     superset for regulatory rules. After this device gets its regulatory
+ *     domain programmed further regulatory hints shall only be considered
+ *     for this device to enhance regulatory compliance, forcing the
+ *     device to only possibly use subsets of the original regulatory
+ *     rules. For example if channel 13 and 14 are disabled by this
+ *     device's regulatory domain no user specified regulatory hint which
+ *     has these channels enabled would enable them for this wiphy,
+ *     the device's original regulatory domain will be trusted as the
+ *     base. You can program the superset of regulatory rules for this
+ *     wiphy with regulatory_hint() for cards programmed with an
+ *     ISO3166-alpha2 country code. wiphys that use regulatory_hint()
+ *     will have their wiphy->regd programmed once the regulatory
+ *     domain is set, and all other regulatory hints will be ignored
+ *     until their own regulatory domain gets programmed.
  * @WIPHY_FLAG_DISABLE_BEACON_HINTS: enable this if your driver needs to ensure
  *     that passive scan flags and beaconing flags may not be lifted by
  *     cfg80211 due to regulatory beacon hints. For more information on beacon
@@ -2466,6 +2472,34 @@ struct wiphy_wowlan_support {
        const struct wiphy_wowlan_tcp_support *tcp;
 };
 
+/**
+ * enum wiphy_vendor_command_flags - validation flags for vendor commands
+ * @WIPHY_VENDOR_CMD_NEED_WDEV: vendor command requires wdev
+ * @WIPHY_VENDOR_CMD_NEED_NETDEV: vendor command requires netdev
+ * @WIPHY_VENDOR_CMD_NEED_RUNNING: interface/wdev must be up & running
+ *     (must be combined with %_WDEV or %_NETDEV)
+ */
+enum wiphy_vendor_command_flags {
+       WIPHY_VENDOR_CMD_NEED_WDEV = BIT(0),
+       WIPHY_VENDOR_CMD_NEED_NETDEV = BIT(1),
+       WIPHY_VENDOR_CMD_NEED_RUNNING = BIT(2),
+};
+
+/**
+ * struct wiphy_vendor_command - vendor command definition
+ * @info: vendor command identifying information, as used in nl80211
+ * @flags: flags, see &enum wiphy_vendor_command_flags
+ * @doit: callback for the operation, note that wdev is %NULL if the
+ *     flags didn't ask for a wdev and non-%NULL otherwise; the data
+ *     pointer may be %NULL if userspace provided no data at all
+ */
+struct wiphy_vendor_command {
+       struct nl80211_vendor_cmd_info info;
+       u32 flags;
+       int (*doit)(struct wiphy *wiphy, struct wireless_dev *wdev,
+                   void *data, int data_len);
+};
+
 /**
  * struct wiphy - wireless hardware description
  * @reg_notifier: the driver's regulatory notification callback,
@@ -2573,6 +2607,12 @@ struct wiphy_wowlan_support {
  *     802.11-2012 8.4.2.29 for the defined fields.
  * @extended_capabilities_mask: mask of the valid values
  * @extended_capabilities_len: length of the extended capabilities
+ * @country_ie_pref: country IE processing preferences specified
+ *     by enum nl80211_country_ie_pref
+ * @vendor_commands: array of vendor commands supported by the hardware
+ * @n_vendor_commands: number of vendor commands
+ * @vendor_events: array of vendor events supported by the hardware
+ * @n_vendor_events: number of vendor events
  */
 struct wiphy {
        /* assign these fields before you register the wiphy */
@@ -2642,6 +2682,8 @@ struct wiphy {
        const u8 *extended_capabilities, *extended_capabilities_mask;
        u8 extended_capabilities_len;
 
+       u8 country_ie_pref;
+
        /* If multiple wiphys are registered and you're handed e.g.
         * a regular netdev with assigned ieee80211_ptr, you won't
         * know whether it points to a wiphy your driver has registered
@@ -2681,6 +2723,10 @@ struct wiphy {
        const struct iw_handler_def *wext;
 #endif
 
+       const struct wiphy_vendor_command *vendor_commands;
+       const struct nl80211_vendor_cmd_info *vendor_events;
+       int n_vendor_commands, n_vendor_events;
+
        char priv[0] __aligned(NETDEV_ALIGN);
 };
 
@@ -3591,6 +3637,121 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy);
  */
 void wiphy_rfkill_stop_polling(struct wiphy *wiphy);
 
+/**
+ * DOC: Vendor commands
+ *
+ * Occasionally, there are special protocol or firmware features that
+ * can't be implemented very openly. For this and similar cases, the
+ * vendor command functionality allows implementing the features with
+ * (typically closed-source) userspace and firmware, using nl80211 as
+ * the configuration mechanism.
+ *
+ * A driver supporting vendor commands must register them as an array
+ * in struct wiphy, with handlers for each one, each command has an
+ * OUI and sub command ID to identify it.
+ *
+ * Note that this feature should not be (ab)used to implement protocol
+ * features that could openly be shared across drivers. In particular,
+ * it must never be required to use vendor commands to implement any
+ * "normal" functionality that higher-level userspace like connection
+ * managers etc. need.
+ */
+
+struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
+                                          enum nl80211_commands cmd,
+                                          enum nl80211_attrs attr,
+                                          int approxlen);
+
+struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
+                                          enum nl80211_commands cmd,
+                                          enum nl80211_attrs attr,
+                                          int vendor_event_idx,
+                                          int approxlen, gfp_t gfp);
+
+void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp);
+
+/**
+ * cfg80211_vendor_cmd_alloc_reply_skb - allocate vendor command reply
+ * @wiphy: the wiphy
+ * @approxlen: an upper bound of the length of the data that will
+ *     be put into the skb
+ *
+ * This function allocates and pre-fills an skb for a reply to
+ * a vendor command. Since it is intended for a reply, calling
+ * it outside of a vendor command's doit() operation is invalid.
+ *
+ * The returned skb is pre-filled with some identifying data in
+ * a way that any data that is put into the skb (with skb_put(),
+ * nla_put() or similar) will end up being within the
+ * %NL80211_ATTR_VENDOR_DATA attribute, so all that needs to be done
+ * with the skb is adding data for the corresponding userspace tool
+ * which can then read that data out of the testdata attribute. You
+ * must not modify the skb in any other way.
+ *
+ * When done, call cfg80211_vendor_cmd_reply() with the skb and return
+ * its error code as the result of the doit() operation.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
+{
+       return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_TESTMODE,
+                                         NL80211_ATTR_TESTDATA, approxlen);
+}
+
+/**
+ * cfg80211_vendor_cmd_reply - send the reply skb
+ * @skb: The skb, must have been allocated with
+ *     cfg80211_vendor_cmd_alloc_reply_skb()
+ *
+ * Since calling this function will usually be the last thing
+ * before returning from the vendor command doit() you should
+ * return the error code.  Note that this function consumes the
+ * skb regardless of the return value.
+ *
+ * Return: An error code or 0 on success.
+ */
+int cfg80211_vendor_cmd_reply(struct sk_buff *skb);
+
+/**
+ * cfg80211_vendor_event_alloc - allocate vendor-specific event skb
+ * @wiphy: the wiphy
+ * @event_idx: index of the vendor event in the wiphy's vendor_events
+ * @approxlen: an upper bound of the length of the data that will
+ *     be put into the skb
+ * @gfp: allocation flags
+ *
+ * This function allocates and pre-fills an skb for an event on the
+ * vendor-specific multicast group.
+ *
+ * When done filling the skb, call cfg80211_vendor_event() with the
+ * skb to send the event.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+cfg80211_vendor_event_alloc(struct wiphy *wiphy, int approxlen,
+                           int event_idx, gfp_t gfp)
+{
+       return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_VENDOR,
+                                         NL80211_ATTR_VENDOR_DATA,
+                                         event_idx, approxlen, gfp);
+}
+
+/**
+ * cfg80211_vendor_event - send the event
+ * @skb: The skb, must have been allocated with cfg80211_vendor_event_alloc()
+ * @gfp: allocation flags
+ *
+ * This function sends the given @skb, which must have been allocated
+ * by cfg80211_vendor_event_alloc(), as an event. It always consumes it.
+ */
+static inline void cfg80211_vendor_event(struct sk_buff *skb, gfp_t gfp)
+{
+       __cfg80211_send_event_skb(skb, gfp);
+}
+
 #ifdef CONFIG_NL80211_TESTMODE
 /**
  * DOC: Test mode
@@ -3626,8 +3787,12 @@ void wiphy_rfkill_stop_polling(struct wiphy *wiphy);
  *
  * Return: An allocated and pre-filled skb. %NULL if any errors happen.
  */
-struct sk_buff *cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy,
-                                                 int approxlen);
+static inline struct sk_buff *
+cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy, int approxlen)
+{
+       return __cfg80211_alloc_reply_skb(wiphy, NL80211_CMD_TESTMODE,
+                                         NL80211_ATTR_TESTDATA, approxlen);
+}
 
 /**
  * cfg80211_testmode_reply - send the reply skb
@@ -3641,7 +3806,10 @@ struct sk_buff *cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy,
  *
  * Return: An error code or 0 on success.
  */
-int cfg80211_testmode_reply(struct sk_buff *skb);
+static inline int cfg80211_testmode_reply(struct sk_buff *skb)
+{
+       return cfg80211_vendor_cmd_reply(skb);
+}
 
 /**
  * cfg80211_testmode_alloc_event_skb - allocate testmode event
@@ -3664,8 +3832,13 @@ int cfg80211_testmode_reply(struct sk_buff *skb);
  *
  * Return: An allocated and pre-filled skb. %NULL if any errors happen.
  */
-struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy,
-                                                 int approxlen, gfp_t gfp);
+static inline struct sk_buff *
+cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp)
+{
+       return __cfg80211_alloc_event_skb(wiphy, NL80211_CMD_TESTMODE,
+                                         NL80211_ATTR_TESTDATA, -1,
+                                         approxlen, gfp);
+}
 
 /**
  * cfg80211_testmode_event - send the event
@@ -3677,7 +3850,10 @@ struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy,
  * by cfg80211_testmode_alloc_event_skb(), as an event. It always
  * consumes it.
  */
-void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp);
+static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
+{
+       __cfg80211_send_event_skb(skb, gfp);
+}
 
 #define CFG80211_TESTMODE_CMD(cmd)     .testmode_cmd = (cmd),
 #define CFG80211_TESTMODE_DUMP(cmd)    .testmode_dump = (cmd),
index 7235ae73a1e8d07a905d6803017d341d951e8f56..9528e10fa0b491f6b65c00e4b9ca88483b73e8b2 100644 (file)
@@ -88,6 +88,7 @@ struct inet_request_sock {
                                acked      : 1,
                                no_srccheck: 1;
        kmemcheck_bitfield_end(flags);
+       u32                     ir_mark;
        struct ip_options_rcu   *opt;
 };
 
@@ -96,6 +97,14 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
        return (struct inet_request_sock *)sk;
 }
 
+static inline u32 inet_request_mark(struct sock *sk, struct sk_buff *skb)
+{
+       if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)
+               return skb->mark;
+
+       return sk->sk_mark;
+}
+
 struct inet_cork {
        unsigned int            flags;
        __be32                  addr;
index 8695359982d1c62b98662787019cc7cd2982ba6e..67aa2d866155726eb85ce3846f2ce3dc0958e13f 100644 (file)
@@ -225,6 +225,9 @@ extern void ipfrag_init(void);
 
 extern void ip_static_sysctl_init(void);
 
+#define IP4_REPLY_MARK(net, mark) \
+       ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
+
 static inline bool ip_is_fragment(const struct iphdr *iph)
 {
        return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
index 087370ff05f11eba38f11c8798e33dc0130bdc57..ac1d532965a27acd5456a8d7cd74a3172733a27f 100644 (file)
@@ -111,6 +111,9 @@ struct frag_hdr {
 
 #define        IP6_MF  0x0001
 
+#define IP6_REPLY_MARK(net, mark) \
+       ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0)
+
 #include <net/sock.h>
 
 /* sysctls */
@@ -260,6 +263,12 @@ static inline void fl6_sock_release(struct ip6_flowlabel *fl)
 
 extern void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
 
+int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+                              struct icmp6hdr *thdr, int len);
+
+struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
+                                     struct sock *sk, struct flowi6 *fl6);
+
 extern int                     ip6_ra_control(struct sock *sk, int sel);
 
 extern int                     ipv6_parse_hopopts(struct sk_buff *skb);
@@ -796,8 +805,7 @@ extern int                  compat_ipv6_getsockopt(struct sock *sk,
 extern int                     ip6_datagram_connect(struct sock *sk, 
                                                     struct sockaddr *addr, int addr_len);
 
-extern int                     ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
-                                               int *addr_len);
+extern int                     ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
 extern int                     ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
                                                 int *addr_len);
 extern void                    ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
index 2ba9de89e8ec778990e8b2ff5183fd3e97eee1aa..0dd6f0b3eadb96119658697e949e50334dcd9434 100644 (file)
@@ -64,6 +64,9 @@ struct netns_ipv4 {
 
        int sysctl_tcp_ecn;
 
+       int sysctl_fwmark_reflect;
+       int sysctl_tcp_fwmark_accept;
+
        kgid_t sysctl_ping_group_range[2];
        long sysctl_tcp_mem[3];
 
index 005e2c2e39a9022bad13f4205343263f1821cb22..4b9f99e3a91c9cfc02f1f48d71ef30f2dec30880 100644 (file)
@@ -28,6 +28,7 @@ struct netns_sysctl_ipv6 {
        int ip6_rt_mtu_expires;
        int ip6_rt_min_advmss;
        int icmpv6_time;
+       int fwmark_reflect;
 };
 
 struct netns_ipv6 {
index 682b5ae9af5165dcc71b4f366c81db7a4df20c01..2db4860e5848def0360395bfb1b2fd0c80b98a47 100644 (file)
@@ -13,6 +13,7 @@
 #ifndef _PING_H
 #define _PING_H
 
+#include <net/icmp.h>
 #include <net/netns/hash.h>
 
 /* PING_HTABLE_SIZE must be power of 2 */
  */
 #define GID_T_MAX (((gid_t)~0U) >> 1)
 
+/* Compatibility glue so we can support IPv6 when it's compiled as a module */
+struct pingv6_ops {
+       int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len);
+       int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg,
+                                    struct sk_buff *skb);
+       int (*icmpv6_err_convert)(u8 type, u8 code, int *err);
+       void (*ipv6_icmp_error)(struct sock *sk, struct sk_buff *skb, int err,
+                               __be16 port, u32 info, u8 *payload);
+       int (*ipv6_chk_addr)(struct net *net, const struct in6_addr *addr,
+                            const struct net_device *dev, int strict);
+};
+
 struct ping_table {
        struct hlist_nulls_head hash[PING_HTABLE_SIZE];
        rwlock_t                lock;
@@ -39,10 +52,39 @@ struct ping_iter_state {
 };
 
 extern struct proto ping_prot;
+extern struct ping_table ping_table;
+#if IS_ENABLED(CONFIG_IPV6)
+extern struct pingv6_ops pingv6_ops;
+#endif
 
+struct pingfakehdr {
+       struct icmphdr icmph;
+       struct iovec *iov;
+       sa_family_t family;
+       __wsum wcheck;
+};
 
-extern void ping_rcv(struct sk_buff *);
-extern void ping_err(struct sk_buff *, u32 info);
+int  ping_get_port(struct sock *sk, unsigned short ident);
+void ping_hash(struct sock *sk);
+void ping_unhash(struct sock *sk);
+
+int  ping_init_sock(struct sock *sk);
+void ping_close(struct sock *sk, long timeout);
+int  ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+void ping_err(struct sk_buff *skb, int offset, u32 info);
+int  ping_getfrag(void *from, char *to, int offset, int fraglen, int odd,
+                 struct sk_buff *);
+
+int  ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                 size_t len, int noblock, int flags, int *addr_len);
+int  ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
+                        void *user_icmph, size_t icmph_len);
+int  ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                    size_t len);
+int  ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                    size_t len);
+int  ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+void ping_rcv(struct sk_buff *skb);
 
 #ifdef CONFIG_PROC_FS
 extern int __init ping_proc_init(void);
@@ -50,6 +92,7 @@ extern void ping_proc_exit(void);
 #endif
 
 void __init ping_init(void);
-
+int  __init pingv6_init(void);
+void pingv6_exit(void);
 
 #endif /* _PING_H */
index 6f87f0873843d3fd65933c7f05e5a51c66103a12..13f12c10f03b84c68c77c5cc178e6f323e0c6346 100644 (file)
@@ -288,6 +288,7 @@ extern int sysctl_tcp_early_retrans;
 extern int sysctl_tcp_limit_output_bytes;
 extern int sysctl_tcp_challenge_ack_limit;
 extern int sysctl_tcp_min_tso_segs;
+extern int sysctl_tcp_default_init_rwnd;
 
 extern atomic_long_t tcp_memory_allocated;
 extern struct percpu_counter tcp_sockets_allocated;
@@ -1552,6 +1553,8 @@ extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
 extern int tcp_gro_complete(struct sk_buff *skb);
 extern int tcp4_gro_complete(struct sk_buff *skb);
 
+extern int tcp_nuke_addr(struct net *net, struct sockaddr *addr);
+
 #ifdef CONFIG_PROC_FS
 extern int tcp4_proc_init(void);
 extern void tcp4_proc_exit(void);
index 938b7fd1120477213888f9c7e61f98039bea3e03..eb40e71ff2ee321d166baa5e67f9b173daf517c1 100644 (file)
@@ -11,6 +11,7 @@ extern struct proto rawv6_prot;
 extern struct proto udpv6_prot;
 extern struct proto udplitev6_prot;
 extern struct proto tcpv6_prot;
+extern struct proto pingv6_prot;
 
 struct flowi6;
 
@@ -21,6 +22,8 @@ extern int                            ipv6_frag_init(void);
 extern void                            ipv6_frag_exit(void);
 
 /* transport protocols */
+extern int                             pingv6_init(void);
+extern void                            pingv6_exit(void);
 extern int                             rawv6_init(void);
 extern void                            rawv6_exit(void);
 extern int                             udpv6_init(void);
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
new file mode 100644 (file)
index 0000000..951e6ca
--- /dev/null
@@ -0,0 +1,112 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq_interactive
+
+#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUFREQ_INTERACTIVE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(set,
+       TP_PROTO(u32 cpu_id, unsigned long targfreq,
+                unsigned long actualfreq),
+       TP_ARGS(cpu_id, targfreq, actualfreq),
+
+       TP_STRUCT__entry(
+           __field(          u32, cpu_id    )
+           __field(unsigned long, targfreq   )
+           __field(unsigned long, actualfreq )
+          ),
+
+       TP_fast_assign(
+           __entry->cpu_id = (u32) cpu_id;
+           __entry->targfreq = targfreq;
+           __entry->actualfreq = actualfreq;
+       ),
+
+       TP_printk("cpu=%u targ=%lu actual=%lu",
+             __entry->cpu_id, __entry->targfreq,
+             __entry->actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_setspeed,
+       TP_PROTO(u32 cpu_id, unsigned long targfreq,
+            unsigned long actualfreq),
+       TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DECLARE_EVENT_CLASS(loadeval,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curtarg, unsigned long curactual,
+                    unsigned long newtarg),
+                   TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
+
+           TP_STRUCT__entry(
+                   __field(unsigned long, cpu_id    )
+                   __field(unsigned long, load      )
+                   __field(unsigned long, curtarg   )
+                   __field(unsigned long, curactual )
+                   __field(unsigned long, newtarg   )
+           ),
+
+           TP_fast_assign(
+                   __entry->cpu_id = cpu_id;
+                   __entry->load = load;
+                   __entry->curtarg = curtarg;
+                   __entry->curactual = curactual;
+                   __entry->newtarg = newtarg;
+           ),
+
+           TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
+                     __entry->cpu_id, __entry->load, __entry->curtarg,
+                     __entry->curactual, __entry->newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_target,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curtarg, unsigned long curactual,
+                    unsigned long newtarg),
+           TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_already,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curtarg, unsigned long curactual,
+                    unsigned long newtarg),
+           TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
+           TP_PROTO(unsigned long cpu_id, unsigned long load,
+                    unsigned long curtarg, unsigned long curactual,
+                    unsigned long newtarg),
+           TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+TRACE_EVENT(cpufreq_interactive_boost,
+           TP_PROTO(const char *s),
+           TP_ARGS(s),
+           TP_STRUCT__entry(
+                   __string(s, s)
+           ),
+           TP_fast_assign(
+                   __assign_str(s, s);
+           ),
+           TP_printk("%s", __get_str(s))
+);
+
+TRACE_EVENT(cpufreq_interactive_unboost,
+           TP_PROTO(const char *s),
+           TP_ARGS(s),
+           TP_STRUCT__entry(
+                   __string(s, s)
+           ),
+           TP_fast_assign(
+                   __assign_str(s, s);
+           ),
+           TP_printk("%s", __get_str(s))
+);
+
+#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/gpu.h b/include/trace/events/gpu.h
new file mode 100644 (file)
index 0000000..7e15cdf
--- /dev/null
@@ -0,0 +1,143 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gpu
+
+#if !defined(_TRACE_GPU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_GPU_H
+
+#include <linux/tracepoint.h>
+#include <linux/time.h>
+
+#define show_secs_from_ns(ns) \
+       ({ \
+               u64 t = ns + (NSEC_PER_USEC / 2); \
+               do_div(t, NSEC_PER_SEC); \
+               t; \
+       })
+
+#define show_usecs_from_ns(ns) \
+       ({ \
+               u64 t = ns + (NSEC_PER_USEC / 2) ; \
+               u32 rem; \
+               do_div(t, NSEC_PER_USEC); \
+               rem = do_div(t, USEC_PER_SEC); \
+       })
+
+/*
+ * The gpu_sched_switch event indicates that a switch from one GPU context to
+ * another occurred on one of the GPU hardware blocks.
+ *
+ * The gpu_name argument identifies the GPU hardware block.  Each independently
+ * scheduled GPU hardware block should have a different name.  This may be used
+ * in different ways for different GPUs.  For example, if a GPU includes
+ * multiple processing cores it may use names "GPU 0", "GPU 1", etc.  If a GPU
+ * includes a separately scheduled 2D and 3D hardware block, it might use the
+ * names "2D" and "3D".
+ *
+ * The timestamp argument is the timestamp at which the switch occurred on the
+ * GPU. These timestamps are in units of nanoseconds and must use
+ * approximately the same time as sched_clock, though they need not come from
+ * any CPU clock. The timestamps for a single hardware block must be
+ * monotonically nondecreasing.  This means that if a variable compensation
+ * offset is used to translate from some other clock to the sched_clock, then
+ * care must be taken when increasing that offset, and doing so may result in
+ * multiple events with the same timestamp.
+ *
+ * The next_ctx_id argument identifies the next context that was running on
+ * the GPU hardware block.  A value of 0 indicates that the hardware block
+ * will be idle.
+ *
+ * The next_prio argument indicates the priority of the next context at the
+ * time of the event.  The exact numeric values may mean different things for
+ * different GPUs, but they should follow the rule that lower values indicate a
+ * higher priority.
+ *
+ * The next_job_id argument identifies the batch of work that the GPU will be
+ * working on.  This should correspond to a job_id that was previously traced
+ * as a gpu_job_enqueue event when the batch of work was created.
+ */
+TRACE_EVENT(gpu_sched_switch,
+
+       TP_PROTO(const char *gpu_name, u64 timestamp,
+               u32 next_ctx_id, s32 next_prio, u32 next_job_id),
+
+       TP_ARGS(gpu_name, timestamp, next_ctx_id, next_prio, next_job_id),
+
+       TP_STRUCT__entry(
+               __string(       gpu_name,       gpu_name        )
+               __field(        u64,            timestamp       )
+               __field(        u32,            next_ctx_id     )
+               __field(        s32,            next_prio       )
+               __field(        u32,            next_job_id     )
+       ),
+
+       TP_fast_assign(
+               __assign_str(gpu_name, gpu_name);
+               __entry->timestamp = timestamp;
+               __entry->next_ctx_id = next_ctx_id;
+               __entry->next_prio = next_prio;
+               __entry->next_job_id = next_job_id;
+       ),
+
+       TP_printk("gpu_name=%s ts=%llu.%06lu next_ctx_id=%lu next_prio=%ld "
+               "next_job_id=%lu",
+               __get_str(gpu_name),
+               (unsigned long long)show_secs_from_ns(__entry->timestamp),
+               (unsigned long)show_usecs_from_ns(__entry->timestamp),
+               (unsigned long)__entry->next_ctx_id,
+               (long)__entry->next_prio,
+               (unsigned long)__entry->next_job_id)
+);
+
+/*
+ * The gpu_job_enqueue event indicates that a batch of work has been queued up
+ * to be processed by the GPU.  This event is not intended to indicate that
+ * the batch of work has been submitted to the GPU hardware, but rather that
+ * it has been submitted to the GPU kernel driver.
+ *
+ * This event should be traced on the thread that initiated the work being
+ * queued.  For example, if a batch of work is submitted to the kernel by a
+ * userland thread, the event should be traced on that thread.
+ *
+ * The ctx_id field identifies the GPU context in which the batch of work
+ * being queued is to be run.
+ *
+ * The job_id field identifies the batch of work being queued within the given
+ * GPU context.  The first batch of work submitted for a given GPU context
+ * should have a job_id of 0, and each subsequent batch of work should
+ * increment the job_id by 1.
+ *
+ * The type field identifies the type of the job being enqueued.  The job
+ * types may be different for different GPU hardware.  For example, a GPU may
+ * differentiate between "2D", "3D", and "compute" jobs.
+ */
+TRACE_EVENT(gpu_job_enqueue,
+
+       TP_PROTO(u32 ctx_id, u32 job_id, const char *type),
+
+       TP_ARGS(ctx_id, job_id, type),
+
+       TP_STRUCT__entry(
+               __field(        u32,            ctx_id          )
+               __field(        u32,            job_id          )
+               __string(       type,           type            )
+       ),
+
+       TP_fast_assign(
+               __entry->ctx_id = ctx_id;
+               __entry->job_id = job_id;
+               __assign_str(type, type);
+       ),
+
+       TP_printk("ctx_id=%lu job_id=%lu type=%s",
+               (unsigned long)__entry->ctx_id,
+               (unsigned long)__entry->job_id,
+               __get_str(type))
+);
+
+#undef show_secs_from_ns
+#undef show_usecs_from_ns
+
+#endif /* _TRACE_GPU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h
new file mode 100644 (file)
index 0000000..82b368d
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mmc
+
+#if !defined(_TRACE_MMC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MMC_H
+
+#include <linux/tracepoint.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/core.h>
+
+/*
+ * Unconditional logging of mmc block erase operations,
+ * including cmd, address, size
+ */
+DECLARE_EVENT_CLASS(mmc_blk_erase_class,
+       TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
+       TP_ARGS(cmd, addr, size),
+       TP_STRUCT__entry(
+               __field(unsigned int, cmd)
+               __field(unsigned int, addr)
+               __field(unsigned int, size)
+       ),
+       TP_fast_assign(
+               __entry->cmd = cmd;
+               __entry->addr = addr;
+               __entry->size = size;
+       ),
+       TP_printk("cmd=%u,addr=0x%08x,size=0x%08x",
+                 __entry->cmd, __entry->addr, __entry->size)
+);
+
+DEFINE_EVENT(mmc_blk_erase_class, mmc_blk_erase_start,
+       TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
+       TP_ARGS(cmd, addr, size));
+
+DEFINE_EVENT(mmc_blk_erase_class, mmc_blk_erase_end,
+       TP_PROTO(unsigned int cmd, unsigned int addr, unsigned int size),
+       TP_ARGS(cmd, addr, size));
+
+/*
+ * Logging of start of read or write mmc block operation,
+ * including cmd, address, size
+ */
+DECLARE_EVENT_CLASS(mmc_blk_rw_class,
+       TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
+       TP_ARGS(cmd, addr, data),
+       TP_STRUCT__entry(
+               __field(unsigned int, cmd)
+               __field(unsigned int, addr)
+               __field(unsigned int, size)
+       ),
+       TP_fast_assign(
+               __entry->cmd = cmd;
+               __entry->addr = addr;
+               __entry->size = data->blocks;
+       ),
+       TP_printk("cmd=%u,addr=0x%08x,size=0x%08x",
+                 __entry->cmd, __entry->addr, __entry->size)
+);
+
+DEFINE_EVENT_CONDITION(mmc_blk_rw_class, mmc_blk_rw_start,
+       TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
+       TP_ARGS(cmd, addr, data),
+       TP_CONDITION(((cmd == MMC_READ_MULTIPLE_BLOCK) ||
+                     (cmd == MMC_WRITE_MULTIPLE_BLOCK)) &&
+                     data));
+
+DEFINE_EVENT_CONDITION(mmc_blk_rw_class, mmc_blk_rw_end,
+       TP_PROTO(unsigned int cmd, unsigned int addr, struct mmc_data *data),
+       TP_ARGS(cmd, addr, data),
+       TP_CONDITION(((cmd == MMC_READ_MULTIPLE_BLOCK) ||
+                     (cmd == MMC_WRITE_MULTIPLE_BLOCK)) &&
+                     data));
+#endif /* _TRACE_MMC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 427acab5d69af24899c4daf19b194e4abd67a243..503fc2b870b97d8b6c9e28a626a0dceb599d5f93 100644 (file)
@@ -146,6 +146,25 @@ DEFINE_EVENT(clock, clock_set_rate,
        TP_ARGS(name, state, cpu_id)
 );
 
+TRACE_EVENT(clock_set_parent,
+
+       TP_PROTO(const char *name, const char *parent_name),
+
+       TP_ARGS(name, parent_name),
+
+       TP_STRUCT__entry(
+               __string(       name,           name            )
+               __string(       parent_name,    parent_name     )
+       ),
+
+       TP_fast_assign(
+               __assign_str(name, name);
+               __assign_str(parent_name, parent_name);
+       ),
+
+       TP_printk("%s parent=%s", __get_str(name), __get_str(parent_name))
+);
+
 /*
  * The power domain events are used for power domains transitions
  */
index 2c267bcbb85c1c4e31b676127db7686d8d027e09..bc81fb2e1f0e19d363d3c8095c85b971c521d12a 100644 (file)
@@ -61,5 +61,16 @@ struct epoll_event {
        __u64 data;
 } EPOLL_PACKED;
 
-
+#ifdef CONFIG_PM_SLEEP
+static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
+{
+       if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
+               epev->events &= ~EPOLLWAKEUP;
+}
+#else
+static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
+{
+       epev->events &= ~EPOLLWAKEUP;
+}
+#endif
 #endif /* _UAPI_LINUX_EVENTPOLL_H */
diff --git a/include/uapi/linux/if_pppolac.h b/include/uapi/linux/if_pppolac.h
new file mode 100644 (file)
index 0000000..b7eb815
--- /dev/null
@@ -0,0 +1,33 @@
+/* include/uapi/linux/if_pppolac.h
+ *
+ * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_LINUX_IF_PPPOLAC_H
+#define _UAPI_LINUX_IF_PPPOLAC_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppolac {
+       sa_family_t     sa_family;      /* AF_PPPOX */
+       unsigned int    sa_protocol;    /* PX_PROTO_OLAC */
+       int             udp_socket;
+       struct __attribute__((packed)) {
+               __u16   tunnel, session;
+       } local, remote;
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_IF_PPPOLAC_H */
diff --git a/include/uapi/linux/if_pppopns.h b/include/uapi/linux/if_pppopns.h
new file mode 100644 (file)
index 0000000..a392b52
--- /dev/null
@@ -0,0 +1,32 @@
+/* include/uapi/linux/if_pppopns.h
+ *
+ * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_LINUX_IF_PPPOPNS_H
+#define _UAPI_LINUX_IF_PPPOPNS_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppopns {
+       sa_family_t     sa_family;      /* AF_PPPOX */
+       unsigned int    sa_protocol;    /* PX_PROTO_OPNS */
+       int             tcp_socket;
+       __u16           local;
+       __u16           remote;
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_IF_PPPOPNS_H */
index e36a4aecd3117ebdaf46c17ac115e5d0a29bcede..87f478b0ca6f0447e92f38cfe8994262307457ac 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/socket.h>
 #include <linux/if_ether.h>
 #include <linux/if_pppol2tp.h>
+#include <linux/if_pppolac.h>
+#include <linux/if_pppopns.h>
 
 /* For user-space programs to pick up these definitions
  * which they wouldn't get otherwise without defining __KERNEL__
@@ -56,7 +58,9 @@ struct pptp_addr {
 #define PX_PROTO_OE    0 /* Currently just PPPoE */
 #define PX_PROTO_OL2TP 1 /* Now L2TP also */
 #define PX_PROTO_PPTP  2
-#define PX_MAX_PROTO   3
+#define PX_PROTO_OLAC  3
+#define PX_PROTO_OPNS  4
+#define PX_MAX_PROTO   5
 
 struct sockaddr_pppox {
        __kernel_sa_family_t sa_family;       /* address family, AF_PPPOX */
index 4649ee35b6052e7ef967b6323003344879774826..a86f5301c609dc025a6f32d840f3ca80f7c19624 100644 (file)
@@ -153,6 +153,9 @@ struct input_keymap_entry {
 
 #define EVIOCGRAB              _IOW('E', 0x90, int)                    /* Grab/Release device */
 
+#define EVIOCGSUSPENDBLOCK     _IOR('E', 0x91, int)                    /* get suspend block enable */
+#define EVIOCSSUSPENDBLOCK     _IOW('E', 0x91, int)                    /* set suspend block enable */
+
 #define EVIOCSCLOCKID          _IOW('E', 0xa0, int)                    /* Set clockid to be used for timestamps */
 
 /*
@@ -458,10 +461,14 @@ struct input_keymap_entry {
 #define KEY_VIDEO_NEXT         241     /* drive next video source */
 #define KEY_VIDEO_PREV         242     /* drive previous video source */
 #define KEY_BRIGHTNESS_CYCLE   243     /* brightness up, after max is min */
-#define KEY_BRIGHTNESS_ZERO    244     /* brightness off, use ambient */
+#define KEY_BRIGHTNESS_AUTO    244     /* Set Auto Brightness: manual
+                                         brightness control is off,
+                                         rely on ambient */
+#define KEY_BRIGHTNESS_ZERO    KEY_BRIGHTNESS_AUTO
 #define KEY_DISPLAY_OFF                245     /* display device to off state */
 
-#define KEY_WIMAX              246
+#define KEY_WWAN               246     /* Wireless WAN (LTE, UMTS, GSM, etc.) */
+#define KEY_WIMAX              KEY_WWAN
 #define KEY_RFKILL             247     /* Key that controls all radios */
 
 #define KEY_MICMUTE            248     /* Mute / unmute the microphone */
@@ -506,11 +513,15 @@ struct input_keymap_entry {
 #define BTN_DEAD               0x12f
 
 #define BTN_GAMEPAD            0x130
-#define BTN_A                  0x130
-#define BTN_B                  0x131
+#define BTN_SOUTH              0x130
+#define BTN_A                  BTN_SOUTH
+#define BTN_EAST               0x131
+#define BTN_B                  BTN_EAST
 #define BTN_C                  0x132
-#define BTN_X                  0x133
-#define BTN_Y                  0x134
+#define BTN_NORTH              0x133
+#define BTN_X                  BTN_NORTH
+#define BTN_WEST               0x134
+#define BTN_Y                  BTN_WEST
 #define BTN_Z                  0x135
 #define BTN_TL                 0x136
 #define BTN_TR                 0x137
@@ -623,6 +634,7 @@ struct input_keymap_entry {
 #define KEY_ADDRESSBOOK                0x1ad   /* AL Contacts/Address Book */
 #define KEY_MESSENGER          0x1ae   /* AL Instant Messaging */
 #define KEY_DISPLAYTOGGLE      0x1af   /* Turn display (LCD) on and off */
+#define KEY_BRIGHTNESS_TOGGLE  KEY_DISPLAYTOGGLE
 #define KEY_SPELLCHECK         0x1b0   /* AL Spell Check */
 #define KEY_LOGOFF             0x1b1   /* AL Logoff */
 
@@ -707,6 +719,24 @@ struct input_keymap_entry {
 #define KEY_ATTENDANT_TOGGLE   0x21d   /* Attendant call on or off */
 #define KEY_LIGHTS_TOGGLE      0x21e   /* Reading light on or off */
 
+#define BTN_DPAD_UP            0x220
+#define BTN_DPAD_DOWN          0x221
+#define BTN_DPAD_LEFT          0x222
+#define BTN_DPAD_RIGHT         0x223
+
+#define KEY_ALS_TOGGLE         0x230   /* Ambient light sensor */
+
+#define KEY_BUTTONCONFIG               0x240   /* AL Button Configuration */
+#define KEY_TASKMANAGER                0x241   /* AL Task/Project Manager */
+#define KEY_JOURNAL            0x242   /* AL Log/Journal/Timecard */
+#define KEY_CONTROLPANEL               0x243   /* AL Control Panel */
+#define KEY_APPSELECT          0x244   /* AL Select Task/Application */
+#define KEY_SCREENSAVER                0x245   /* AL Screen Saver */
+#define KEY_VOICECOMMAND               0x246   /* Listening Voice Command */
+
+#define KEY_BRIGHTNESS_MIN             0x250   /* Set Brightness to Minimum */
+#define KEY_BRIGHTNESS_MAX             0x251   /* Set Brightness to Maximum */
+
 #define BTN_TRIGGER_HAPPY              0x2c0
 #define BTN_TRIGGER_HAPPY1             0x2c0
 #define BTN_TRIGGER_HAPPY2             0x2c1
@@ -844,6 +874,7 @@ struct input_keymap_entry {
 #define SW_FRONT_PROXIMITY     0x0b  /* set = front proximity sensor active */
 #define SW_ROTATE_LOCK         0x0c  /* set = rotate locked/disabled */
 #define SW_LINEIN_INSERT       0x0d  /* set = inserted */
+#define SW_MUTE_DEVICE         0x0e  /* set = device disabled */
 #define SW_MAX                 0x0f
 #define SW_CNT                 (SW_MAX+1)
 
index 4bda4cf5b0f56d84651497df86bd9fa09909fa85..4214fac1bf4fbeea0c702c440f39aaa6cad94291 100644 (file)
@@ -160,6 +160,7 @@ enum {
        DEVCONF_ACCEPT_DAD,
        DEVCONF_FORCE_TLLAO,
        DEVCONF_NDISC_NOTIFY,
+       DEVCONF_ACCEPT_RA_RT_TABLE,
        DEVCONF_MAX
 };
 
diff --git a/include/uapi/linux/keychord.h b/include/uapi/linux/keychord.h
new file mode 100644 (file)
index 0000000..ea7cf4d
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ *  Key chord input driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _UAPI_LINUX_KEYCHORD_H_
+#define _UAPI_LINUX_KEYCHORD_H_
+
+#include <linux/input.h>
+
+#define KEYCHORD_VERSION               1
+
+/*
+ * One or more input_keychord structs are written to /dev/keychord
+ * at once to specify the list of keychords to monitor.
+ * Reading /dev/keychord returns the id of a keychord when the
+ * keychord combination is pressed.  A keychord is signalled when
+ * all of the keys in the keycode list are in the pressed state.
+ * The order in which the keys are pressed does not matter.
+ * The keychord will not be signalled if keys not in the keycode
+ * list are pressed.
+ * Keychords will not be signalled on key release events.
+ */
+struct input_keychord {
+       /* should be KEYCHORD_VERSION */
+       __u16 version;
+       /*
+        * client specified ID, returned from read()
+        * when this keychord is pressed.
+        */
+       __u16 id;
+
+       /* number of keycodes in this keychord */
+       __u16 count;
+
+       /* variable length array of keycodes */
+       __u16 keycodes[];
+};
+
+#endif /* _UAPI_LINUX_KEYCHORD_H_ */
index f055e58b31473f76f946d0b03b0c6f9444e79519..db4ae0cd16c7f2d7ffa9b009be9a1f08a991632c 100644 (file)
@@ -104,6 +104,7 @@ struct __fat_dirent {
 /* <linux/videotext.h> has used 0x72 ('r') in collision, so skip a few */
 #define FAT_IOCTL_GET_ATTRIBUTES       _IOR('r', 0x10, __u32)
 #define FAT_IOCTL_SET_ATTRIBUTES       _IOW('r', 0x11, __u32)
+#define VFAT_IOCTL_GET_VOLUME_ID       _IOR('r', 0x12, __u32)
 
 struct fat_boot_sector {
        __u8    ignored[3];     /* Boot strap short or near jump */
@@ -161,6 +162,17 @@ struct fat_boot_fsinfo {
        __le32   reserved2[4];
 };
 
+struct fat_boot_bsx {
+       __u8     drive;         /* drive number */
+       __u8     reserved1;
+       __u8     signature;     /* extended boot signature */
+       __u8     vol_id[4];     /* volume ID */
+       __u8     vol_label[11]; /* volume label */
+       __u8     type[8];       /* file system type */
+};
+#define FAT16_BSX_OFFSET 36 /* offset of fat_boot_bsx in FAT12 and FAT16 */
+#define FAT32_BSX_OFFSET 64 /* offset of fat_boot_bsx in FAT32 */
+
 struct msdos_dir_entry {
        __u8    name[MSDOS_NAME];/* name and extension */
        __u8    attr;           /* attribute bits */
index 208ae938733143ce0ba2117378423d443a3d8312..faaa28b3d0613e9ff68618f0fa89db80158517e9 100644 (file)
@@ -4,6 +4,7 @@
  * Header file for Xtables timer target module.
  *
  * Copyright (C) 2004, 2010 Nokia Corporation
+ *
  * Written by Timo Teras <ext-timo.teras@nokia.com>
  *
  * Converted to x_tables and forward-ported to 2.6.34
 #include <linux/types.h>
 
 #define MAX_IDLETIMER_LABEL_SIZE 28
+#define NLMSG_MAX_SIZE 64
+
+#define NL_EVENT_TYPE_INACTIVE 0
+#define NL_EVENT_TYPE_ACTIVE 1
 
 struct idletimer_tg_info {
        __u32 timeout;
 
        char label[MAX_IDLETIMER_LABEL_SIZE];
 
+       /* Use netlink messages for notification in addition to sysfs */
+       __u8 send_nl_msg;
+
        /* for kernel module internal use only */
        struct idletimer_tg *timer __attribute__((aligned(8)));
 };
index 26d7217bd4f1cf1337a7c1d5d5643801d23b4e8a..63594564831c4b26f8e793d6d9b52c18406dc79c 100644 (file)
@@ -11,4 +11,10 @@ struct xt_socket_mtinfo1 {
        __u8 flags;
 };
 
+void xt_socket_put_sk(struct sock *sk);
+struct sock *xt_socket_get4_sk(const struct sk_buff *skb,
+                              struct xt_action_param *par);
+struct sock *xt_socket_get6_sk(const struct sk_buff *skb,
+                              struct xt_action_param *par);
+
 #endif /* _XT_SOCKET_H */
index d1e48b5e348fa829d5b05ca9d1886bf2c077c4d6..11c8c1707d2347d9c6c93a262542967662ee8ff0 100644 (file)
  * @NL80211_CMD_CRIT_PROTOCOL_STOP: Indicates the connection reliability can
  *     return back to normal.
  *
+ * @NL80211_CMD_GET_COALESCE: Get currently supported coalesce rules.
+ * @NL80211_CMD_SET_COALESCE: Configure coalesce rules or clear existing rules.
+ *
+ * @NL80211_CMD_CHANNEL_SWITCH: Perform a channel switch by announcing the
+ *     the new channel information (Channel Switch Announcement - CSA)
+ *     in the beacon for some time (as defined in the
+ *     %NL80211_ATTR_CH_SWITCH_COUNT parameter) and then change to the
+ *     new channel. Userspace provides the new channel information (using
+ *     %NL80211_ATTR_WIPHY_FREQ and the attributes determining channel
+ *     width). %NL80211_ATTR_CH_SWITCH_BLOCK_TX may be supplied to inform
+ *     other station that transmission must be blocked until the channel
+ *     switch is complete.
+ *
+ * @NL80211_CMD_VENDOR: Vendor-specified command/event. The command is specified
+ *     by the %NL80211_ATTR_VENDOR_ID attribute and a sub-command in
+ *     %NL80211_ATTR_VENDOR_SUBCMD. Parameter(s) can be transported in
+ *     %NL80211_ATTR_VENDOR_DATA.
+ *     For feature advertisement, the %NL80211_ATTR_VENDOR_DATA attribute is
+ *     used in the wiphy data as a nested attribute containing descriptions
+ *     (&struct nl80211_vendor_cmd_info) of the supported vendor commands.
+ *     This may also be sent as an event with the same attributes.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -808,6 +830,13 @@ enum nl80211_commands {
        NL80211_CMD_CRIT_PROTOCOL_START,
        NL80211_CMD_CRIT_PROTOCOL_STOP,
 
+       NL80211_CMD_GET_COALESCE,
+       NL80211_CMD_SET_COALESCE,
+
+       NL80211_CMD_CHANNEL_SWITCH,
+
+       NL80211_CMD_VENDOR,
+
        /* add new commands above here */
 
        /* used to define NL80211_CMD_MAX below */
@@ -1429,6 +1458,48 @@ enum nl80211_commands {
  * @NL80211_ATTR_MAX_CRIT_PROT_DURATION: duration in milliseconds in which
  *      the connection should have increased reliability (u16).
  *
+ * @NL80211_ATTR_PEER_AID: Association ID for the peer TDLS station (u16).
+ *     This is similar to @NL80211_ATTR_STA_AID but with a difference of being
+ *     allowed to be used with the first @NL80211_CMD_SET_STATION command to
+ *     update a TDLS peer STA entry.
+ *
+ * @NL80211_ATTR_COALESCE_RULE: Coalesce rule information.
+ *
+ * @NL80211_ATTR_CH_SWITCH_COUNT: u32 attribute specifying the number of TBTT's
+ *     until the channel switch event.
+ * @NL80211_ATTR_CH_SWITCH_BLOCK_TX: flag attribute specifying that transmission
+ *     must be blocked on the current channel (before the channel switch
+ *     operation).
+ * @NL80211_ATTR_CSA_IES: Nested set of attributes containing the IE information
+ *     for the time while performing a channel switch.
+ * @NL80211_ATTR_CSA_C_OFF_BEACON: Offset of the channel switch counter
+ *     field in the beacons tail (%NL80211_ATTR_BEACON_TAIL).
+ * @NL80211_ATTR_CSA_C_OFF_PRESP: Offset of the channel switch counter
+ *     field in the probe response (%NL80211_ATTR_PROBE_RESP).
+ *
+ * @NL80211_ATTR_RXMGMT_FLAGS: flags for nl80211_send_mgmt(), u32.
+ *     As specified in the &enum nl80211_rxmgmt_flags.
+ *
+ * @NL80211_ATTR_STA_SUPPORTED_CHANNELS: array of supported channels.
+ *
+ * @NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES: array of supported
+ *      supported operating classes.
+ *
+ * @NL80211_ATTR_HANDLE_DFS: A flag indicating whether user space
+ *     controls DFS operation in IBSS mode. If the flag is included in
+ *     %NL80211_CMD_JOIN_IBSS request, the driver will allow use of DFS
+ *     channels and reports radar events to userspace. Userspace is required
+ *     to react to radar events, e.g. initiate a channel switch or leave the
+ *     IBSS network.
+ *
+ * @NL80211_ATTR_VENDOR_ID: The vendor ID, either a 24-bit OUI or, if
+ *     %NL80211_VENDOR_ID_IS_LINUX is set, a special Linux ID (not used yet)
+ * @NL80211_ATTR_VENDOR_SUBCMD: vendor sub-command
+ * @NL80211_ATTR_VENDOR_DATA: data for the vendor command, if any; this
+ *     attribute is also used for vendor command feature advertisement
+ * @NL80211_ATTR_VENDOR_EVENTS: used for event list advertising in the wiphy
+ *     info, containing a nested array of possible events
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1727,6 +1798,35 @@ enum nl80211_attrs {
        NL80211_ATTR_CRIT_PROT_ID,
        NL80211_ATTR_MAX_CRIT_PROT_DURATION,
 
+       NL80211_ATTR_PEER_AID,
+
+       NL80211_ATTR_COALESCE_RULE,
+
+       NL80211_ATTR_CH_SWITCH_COUNT,
+       NL80211_ATTR_CH_SWITCH_BLOCK_TX,
+       NL80211_ATTR_CSA_IES,
+       NL80211_ATTR_CSA_C_OFF_BEACON,
+       NL80211_ATTR_CSA_C_OFF_PRESP,
+
+       NL80211_ATTR_RXMGMT_FLAGS,
+
+       NL80211_ATTR_STA_SUPPORTED_CHANNELS,
+
+       NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES,
+
+       NL80211_ATTR_HANDLE_DFS,
+
+       NL80211_ATTR_SUPPORT_5_MHZ,
+       NL80211_ATTR_SUPPORT_10_MHZ,
+
+       NL80211_ATTR_OPMODE_NOTIF,
+
+       NL80211_ATTR_VENDOR_ID,
+       NL80211_ATTR_VENDOR_SUBCMD,
+       NL80211_ATTR_VENDOR_DATA,
+
+       NL80211_ATTR_VENDOR_EVENTS,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -2278,9 +2378,15 @@ enum nl80211_reg_rule_attr {
  * enum nl80211_sched_scan_match_attr - scheduled scan match attributes
  * @__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID: attribute number 0 is reserved
  * @NL80211_SCHED_SCAN_MATCH_ATTR_SSID: SSID to be used for matching,
- * only report BSS with matching SSID.
+ *     only report BSS with matching SSID.
  * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI: RSSI threshold (in dBm) for reporting a
- *     BSS in scan results. Filtering is turned off if not specified.
+ *     BSS in scan results. Filtering is turned off if not specified. Note that
+ *     if this attribute is in a match set of its own, then it is treated as
+ *     the default value for all matchsets with an SSID, rather than being a
+ *     matchset of its own without an RSSI filter. This is due to problems with
+ *     how this API was implemented in the past. Also, due to the same problem,
+ *     the only way to create a matchset with only an RSSI filter (with this
+ *     attribute) is if there's only a single matchset with the RSSI attribute.
  * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
  *     attribute number currently defined
  * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -3721,4 +3827,24 @@ enum nl80211_crit_proto_id {
 /* maximum duration for critical protocol measures */
 #define NL80211_CRIT_PROTO_MAX_DURATION                5000 /* msec */
 
+/*
+ * If this flag is unset, the lower 24 bits are an OUI, if set
+ * a Linux nl80211 vendor ID is used (no such IDs are allocated
+ * yet, so that's not valid so far)
+ */
+#define NL80211_VENDOR_ID_IS_LINUX     0x80000000
+
+/**
+ * struct nl80211_vendor_cmd_info - vendor command data
+ * @vendor_id: If the %NL80211_VENDOR_ID_IS_LINUX flag is clear, then the
+ *     value is a 24-bit OUI; if it is set then a separately allocated ID
+ *     may be used, but no such IDs are allocated yet. New IDs should be
+ *     added to this file when needed.
+ * @subcmd: sub-command ID for the command
+ */
+struct nl80211_vendor_cmd_info {
+       __u32 vendor_id;
+       __u32 subcmd;
+};
+
 #endif /* __LINUX_NL80211_H */
index 289760f424aaa3247d2e4f66841334c67295c9ef..28bb0b3a08bf99b20b64500ee9ef0a824b4f2707 100644 (file)
 
 #define PR_GET_TID_ADDRESS     40
 
+/* Sets the timerslack for arbitrary threads
+ * arg2 slack value, 0 means "use default"
+ * arg3 pid of the thread whose timer slack needs to be set
+ */
+#define PR_SET_TIMERSLACK_PID 41
+
+#define PR_SET_VMA             0x53564d41
+# define PR_SET_VMA_ANON_NAME          0
+
 #endif /* _LINUX_PRCTL_H */
index 7997a506ad4105fb145a9ddc120286a8431a1502..f7ffe36db03c4d8a1b9de218a8c402e24bd6e87f 100644 (file)
@@ -65,6 +65,7 @@
 #define SIOCDIFADDR    0x8936          /* delete PA address            */
 #define        SIOCSIFHWBROADCAST      0x8937  /* set hardware broadcast addr  */
 #define SIOCGIFCOUNT   0x8938          /* get number of devices */
+#define SIOCKILLADDR   0x8939          /* kill sockets with this local addr */
 
 #define SIOCGIFBR      0x8940          /* Bridging support             */
 #define SIOCSIFBR      0x8941          /* Set bridging options         */
diff --git a/include/uapi/linux/usb/f_accessory.h b/include/uapi/linux/usb/f_accessory.h
new file mode 100644 (file)
index 0000000..0baeb7d
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_USB_F_ACCESSORY_H
+#define _UAPI_LINUX_USB_F_ACCESSORY_H
+
+/* Use Google Vendor ID when in accessory mode */
+#define USB_ACCESSORY_VENDOR_ID 0x18D1
+
+
+/* Product ID to use when in accessory mode */
+#define USB_ACCESSORY_PRODUCT_ID 0x2D00
+
+/* Product ID to use when in accessory mode and adb is enabled */
+#define USB_ACCESSORY_ADB_PRODUCT_ID 0x2D01
+
+/* Indexes for strings sent by the host via ACCESSORY_SEND_STRING */
+#define ACCESSORY_STRING_MANUFACTURER   0
+#define ACCESSORY_STRING_MODEL          1
+#define ACCESSORY_STRING_DESCRIPTION    2
+#define ACCESSORY_STRING_VERSION        3
+#define ACCESSORY_STRING_URI            4
+#define ACCESSORY_STRING_SERIAL         5
+
+/* Control request for retrieving device's protocol version
+ *
+ *     requestType:    USB_DIR_IN | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_GET_PROTOCOL
+ *     value:          0
+ *     index:          0
+ *     data            version number (16 bits little endian)
+ *                     1 for original accessory support
+ *                     2 adds HID and device to host audio support
+ */
+#define ACCESSORY_GET_PROTOCOL  51
+
+/* Control request for host to send a string to the device
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_SEND_STRING
+ *     value:          0
+ *     index:          string ID
+ *     data            zero terminated UTF8 string
+ *
+ *  The device can later retrieve these strings via the
+ *  ACCESSORY_GET_STRING_* ioctls
+ */
+#define ACCESSORY_SEND_STRING   52
+
+/* Control request for starting device in accessory mode.
+ * The host sends this after setting all its strings to the device.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_START
+ *     value:          0
+ *     index:          0
+ *     data            none
+ */
+#define ACCESSORY_START         53
+
+/* Control request for registering a HID device.
+ * Upon registering, a unique ID is sent by the accessory in the
+ * value parameter. This ID will be used for future commands for
+ * the device
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_REGISTER_HID_DEVICE
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          total length of the HID report descriptor
+ *     data            none
+ */
+#define ACCESSORY_REGISTER_HID         54
+
+/* Control request for unregistering a HID device.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_REGISTER_HID
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          0
+ *     data            none
+ */
+#define ACCESSORY_UNREGISTER_HID         55
+
+/* Control request for sending the HID report descriptor.
+ * If the HID descriptor is longer than the endpoint zero max packet size,
+ * the descriptor will be sent in multiple ACCESSORY_SET_HID_REPORT_DESC
+ * commands. The data for the descriptor must be sent sequentially
+ * if multiple packets are needed.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_SET_HID_REPORT_DESC
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          offset of data in descriptor
+ *                      (needed when HID descriptor is too big for one packet)
+ *     data            the HID report descriptor
+ */
+#define ACCESSORY_SET_HID_REPORT_DESC         56
+
+/* Control request for sending HID events.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_SEND_HID_EVENT
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          0
+ *     data            the HID report for the event
+ */
+#define ACCESSORY_SEND_HID_EVENT         57
+
+/* Control request for setting the audio mode.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_SET_AUDIO_MODE
+ *     value:          0 - no audio
+ *                     1 - device to host, 44100 16-bit stereo PCM
+ *     index:          0
+ *     data            none
+ */
+#define ACCESSORY_SET_AUDIO_MODE         58
+
+/* ioctls for retrieving strings set by the host */
+#define ACCESSORY_GET_STRING_MANUFACTURER   _IOW('M', 1, char[256])
+#define ACCESSORY_GET_STRING_MODEL          _IOW('M', 2, char[256])
+#define ACCESSORY_GET_STRING_DESCRIPTION    _IOW('M', 3, char[256])
+#define ACCESSORY_GET_STRING_VERSION        _IOW('M', 4, char[256])
+#define ACCESSORY_GET_STRING_URI            _IOW('M', 5, char[256])
+#define ACCESSORY_GET_STRING_SERIAL         _IOW('M', 6, char[256])
+/* returns 1 if there is a start request pending */
+#define ACCESSORY_IS_START_REQUESTED        _IO('M', 7)
+/* returns audio mode (set via the ACCESSORY_SET_AUDIO_MODE control request) */
+#define ACCESSORY_GET_AUDIO_MODE            _IO('M', 8)
+
+#endif /* _UAPI_LINUX_USB_F_ACCESSORY_H */
diff --git a/include/uapi/linux/usb/f_mtp.h b/include/uapi/linux/usb/f_mtp.h
new file mode 100644 (file)
index 0000000..5032918
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_USB_F_MTP_H
+#define _UAPI_LINUX_USB_F_MTP_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+struct mtp_file_range {
+       /* file descriptor for file to transfer */
+       int                     fd;
+       /* offset in file for start of transfer */
+       loff_t          offset;
+       /* number of bytes to transfer */
+       int64_t         length;
+       /* MTP command ID for data header,
+        * used only for MTP_SEND_FILE_WITH_HEADER
+        */
+       uint16_t        command;
+       /* MTP transaction ID for data header,
+        * used only for MTP_SEND_FILE_WITH_HEADER
+        */
+       uint32_t        transaction_id;
+};
+
+struct mtp_event {
+       /* size of the event */
+       size_t          length;
+       /* event data to send */
+       void            *data;
+};
+
+/* Sends the specified file range to the host */
+#define MTP_SEND_FILE              _IOW('M', 0, struct mtp_file_range)
+/* Receives data from the host and writes it to a file.
+ * The file is created if it does not exist.
+ */
+#define MTP_RECEIVE_FILE           _IOW('M', 1, struct mtp_file_range)
+/* Sends an event to the host via the interrupt endpoint */
+#define MTP_SEND_EVENT             _IOW('M', 3, struct mtp_event)
+/* Sends the specified file range to the host,
+ * with a 12 byte MTP data packet header at the beginning.
+ */
+#define MTP_SEND_FILE_WITH_HEADER  _IOW('M', 4, struct mtp_file_range)
+
+#endif /* _UAPI_LINUX_USB_F_MTP_H */
diff --git a/include/uapi/video/adf.h b/include/uapi/video/adf.h
new file mode 100644 (file)
index 0000000..c5d2e62
--- /dev/null
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_VIDEO_ADF_H_
+#define _UAPI_VIDEO_ADF_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_mode.h>
+
+#define ADF_NAME_LEN 32
+#define ADF_MAX_CUSTOM_DATA_SIZE 4096
+
+enum adf_interface_type {
+       ADF_INTF_DSI = 0,
+       ADF_INTF_eDP = 1,
+       ADF_INTF_DPI = 2,
+       ADF_INTF_VGA = 3,
+       ADF_INTF_DVI = 4,
+       ADF_INTF_HDMI = 5,
+       ADF_INTF_MEMORY = 6,
+       ADF_INTF_TYPE_DEVICE_CUSTOM = 128,
+       ADF_INTF_TYPE_MAX = (~(__u32)0),
+};
+
+#define ADF_INTF_FLAG_PRIMARY (1 << 0)
+#define ADF_INTF_FLAG_EXTERNAL (1 << 1)
+
+enum adf_event_type {
+       ADF_EVENT_VSYNC = 0,
+       ADF_EVENT_HOTPLUG = 1,
+       ADF_EVENT_DEVICE_CUSTOM = 128,
+       ADF_EVENT_TYPE_MAX = 255,
+};
+
+/**
+ * struct adf_set_event - start or stop subscribing to ADF events
+ *
+ * @type: the type of event to (un)subscribe
+ * @enabled: subscribe or unsubscribe
+ *
+ * After subscribing to an event, userspace may poll() the ADF object's fd
+ * to wait for events or read() to consume the event's data.
+ *
+ * ADF reserves event types 0 to %ADF_EVENT_DEVICE_CUSTOM-1 for its own events.
+ * Devices may use event types %ADF_EVENT_DEVICE_CUSTOM to %ADF_EVENT_TYPE_MAX-1
+ * for driver-private events.
+ */
+struct adf_set_event {
+       __u8 type;
+       __u8 enabled;
+};
+
+/**
+ * struct adf_event - common header for ADF event data
+ *
+ * @type: event type
+ * @length: total size of event data, header inclusive
+ */
+struct adf_event {
+       __u8 type;
+       __u32 length;
+};
+
+/**
+ * struct adf_vsync_event - ADF vsync event
+ *
+ * @base: event header (see &struct adf_event)
+ * @timestamp: time of vsync event, in nanoseconds
+ */
+struct adf_vsync_event {
+       struct adf_event base;
+       __aligned_u64 timestamp;
+};
+
+/**
+ * struct adf_vsync_event - ADF display hotplug event
+ *
+ * @base: event header (see &struct adf_event)
+ * @connected: whether a display is now connected to the interface
+ */
+struct adf_hotplug_event {
+       struct adf_event base;
+       __u8 connected;
+};
+
+#define ADF_MAX_PLANES 4
+/**
+ * struct adf_buffer_config - description of buffer displayed by adf_post_config
+ *
+ * @overlay_engine: id of the target overlay engine
+ * @w: width of display region in pixels
+ * @h: height of display region in pixels
+ * @format: DRM-style fourcc, see drm_fourcc.h for standard formats
+ * @fd: dma_buf fd for each plane
+ * @offset: location of first pixel to scan out, in bytes
+ * @pitch: stride (i.e. length of a scanline including padding) in bytes
+ * @n_planes: number of planes in buffer
+ * @acquire_fence: sync_fence fd which will clear when the buffer is
+ *     ready for display, or <0 if the buffer is already ready
+ */
+struct adf_buffer_config {
+       __u32 overlay_engine;
+
+       __u32 w;
+       __u32 h;
+       __u32 format;
+
+       __s32 fd[ADF_MAX_PLANES];
+       __u32 offset[ADF_MAX_PLANES];
+       __u32 pitch[ADF_MAX_PLANES];
+       __u8 n_planes;
+
+       __s32 acquire_fence;
+};
+#define ADF_MAX_BUFFERS (4096 / sizeof(struct adf_buffer_config))
+
+/**
+ * struct adf_post_config - request to flip to a new set of buffers
+ *
+ * @n_interfaces: number of interfaces targeted by the flip (input)
+ * @interfaces: ids of interfaces targeted by the flip (input)
+ * @n_bufs: number of buffers displayed (input)
+ * @bufs: description of buffers displayed (input)
+ * @custom_data_size: size of driver-private data (input)
+ * @custom_data: driver-private data (input)
+ * @complete_fence: sync_fence fd which will clear when this
+ *     configuration has left the screen (output)
+ */
+struct adf_post_config {
+       size_t n_interfaces;
+       __u32 __user *interfaces;
+
+       size_t n_bufs;
+       struct adf_buffer_config __user *bufs;
+
+       size_t custom_data_size;
+       void __user *custom_data;
+
+       __s32 complete_fence;
+};
+#define ADF_MAX_INTERFACES (4096 / sizeof(__u32))
+
+/**
+ * struct adf_simple_buffer_allocate - request to allocate a "simple" buffer
+ *
+ * @w: width of buffer in pixels (input)
+ * @h: height of buffer in pixels (input)
+ * @format: DRM-style fourcc (input)
+ *
+ * @fd: dma_buf fd (output)
+ * @offset: location of first pixel, in bytes (output)
+ * @pitch: length of a scanline including padding, in bytes (output)
+ *
+ * Simple buffers are analogous to DRM's "dumb" buffers.  They have a single
+ * plane of linear RGB data which can be allocated and scanned out without
+ * any driver-private ioctls or data.
+ *
+ * @format must be a standard RGB format defined in drm_fourcc.h.
+ *
+ * ADF clients must NOT assume that an interface can scan out a simple buffer
+ * allocated by a different ADF interface, even if the two interfaces belong to
+ * the same ADF device.
+ */
+struct adf_simple_buffer_alloc {
+       __u16 w;
+       __u16 h;
+       __u32 format;
+
+       __s32 fd;
+       __u32 offset;
+       __u32 pitch;
+};
+
+/**
+ * struct adf_simple_post_config - request to flip to a single buffer without
+ * driver-private data
+ *
+ * @buf: description of buffer displayed (input)
+ * @complete_fence: sync_fence fd which will clear when this buffer has left the
+ * screen (output)
+ */
+struct adf_simple_post_config {
+       struct adf_buffer_config buf;
+       __s32 complete_fence;
+};
+
+/**
+ * struct adf_attachment_config - description of attachment between an overlay
+ * engine and an interface
+ *
+ * @overlay_engine: id of the overlay engine
+ * @interface: id of the interface
+ */
+struct adf_attachment_config {
+       __u32 overlay_engine;
+       __u32 interface;
+};
+
+/**
+ * struct adf_device_data - describes a display device
+ *
+ * @name: display device's name
+ * @n_attachments: the number of current attachments
+ * @attachments: list of current attachments
+ * @n_allowed_attachments: the number of allowed attachments
+ * @allowed_attachments: list of allowed attachments
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ */
+struct adf_device_data {
+       char name[ADF_NAME_LEN];
+
+       size_t n_attachments;
+       struct adf_attachment_config __user *attachments;
+
+       size_t n_allowed_attachments;
+       struct adf_attachment_config __user *allowed_attachments;
+
+       size_t custom_data_size;
+       void __user *custom_data;
+};
+#define ADF_MAX_ATTACHMENTS (4096 / sizeof(struct adf_attachment_config))
+
+/**
+ * struct adf_device_data - describes a display interface
+ *
+ * @name: display interface's name
+ * @type: interface type (see enum @adf_interface_type)
+ * @id: which interface of type @type;
+ *     e.g. interface DSI.1 -> @type=@ADF_INTF_TYPE_DSI, @id=1
+ * @flags: informational flags (bitmask of %ADF_INTF_FLAG_* values)
+ * @dpms_state: DPMS state (one of @DRM_MODE_DPMS_* defined in drm_mode.h)
+ * @hotplug_detect: whether a display is plugged in
+ * @width_mm: screen width in millimeters, or 0 if unknown
+ * @height_mm: screen height in millimeters, or 0 if unknown
+ * @current_mode: current display mode
+ * @n_available_modes: the number of hardware display modes
+ * @available_modes: list of hardware display modes
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ */
+struct adf_interface_data {
+       char name[ADF_NAME_LEN];
+
+       __u32 type;
+       __u32 id;
+       /* e.g. type=ADF_INTF_TYPE_DSI, id=1 => DSI.1 */
+       __u32 flags;
+
+       __u8 dpms_state;
+       __u8 hotplug_detect;
+       __u16 width_mm;
+       __u16 height_mm;
+
+       struct drm_mode_modeinfo current_mode;
+       size_t n_available_modes;
+       struct drm_mode_modeinfo __user *available_modes;
+
+       size_t custom_data_size;
+       void __user *custom_data;
+};
+#define ADF_MAX_MODES (4096 / sizeof(struct drm_mode_modeinfo))
+
+/**
+ * struct adf_overlay_engine_data - describes an overlay engine
+ *
+ * @name: overlay engine's name
+ * @n_supported_formats: number of supported formats
+ * @supported_formats: list of supported formats
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ */
+struct adf_overlay_engine_data {
+       char name[ADF_NAME_LEN];
+
+       size_t n_supported_formats;
+       __u32 __user *supported_formats;
+
+       size_t custom_data_size;
+       void __user *custom_data;
+};
+#define ADF_MAX_SUPPORTED_FORMATS (4096 / sizeof(__u32))
+
+#define ADF_IOCTL_TYPE         'D'
+#define ADF_IOCTL_NR_CUSTOM    128
+
+#define ADF_SET_EVENT          _IOW(ADF_IOCTL_TYPE, 0, struct adf_set_event)
+#define ADF_BLANK              _IOW(ADF_IOCTL_TYPE, 1, __u8)
+#define ADF_POST_CONFIG                _IOW(ADF_IOCTL_TYPE, 2, struct adf_post_config)
+#define ADF_SET_MODE           _IOW(ADF_IOCTL_TYPE, 3, \
+                                       struct drm_mode_modeinfo)
+#define ADF_GET_DEVICE_DATA    _IOR(ADF_IOCTL_TYPE, 4, struct adf_device_data)
+#define ADF_GET_INTERFACE_DATA _IOR(ADF_IOCTL_TYPE, 5, \
+                                       struct adf_interface_data)
+#define ADF_GET_OVERLAY_ENGINE_DATA \
+                               _IOR(ADF_IOCTL_TYPE, 6, \
+                                       struct adf_overlay_engine_data)
+#define ADF_SIMPLE_POST_CONFIG _IOW(ADF_IOCTL_TYPE, 7, \
+                                       struct adf_simple_post_config)
+#define ADF_SIMPLE_BUFFER_ALLOC        _IOW(ADF_IOCTL_TYPE, 8, \
+                                       struct adf_simple_buffer_alloc)
+#define ADF_ATTACH             _IOW(ADF_IOCTL_TYPE, 9, \
+                                       struct adf_attachment_config)
+#define ADF_DETACH             _IOW(ADF_IOCTL_TYPE, 10, \
+                                       struct adf_attachment_config)
+
+#endif /* _UAPI_VIDEO_ADF_H_ */
diff --git a/include/video/adf.h b/include/video/adf.h
new file mode 100644 (file)
index 0000000..34f10e5
--- /dev/null
@@ -0,0 +1,502 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_H
+#define _VIDEO_ADF_H
+
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <uapi/video/adf.h>
+#include "sync.h"
+
+struct adf_obj;
+struct adf_obj_ops;
+struct adf_device;
+struct adf_device_ops;
+struct adf_interface;
+struct adf_interface_ops;
+struct adf_overlay_engine;
+struct adf_overlay_engine_ops;
+
+/**
+ * struct adf_buffer - buffer displayed by adf_post
+ *
+ * @overlay_engine: target overlay engine
+ * @w: width of display region in pixels
+ * @h: height of display region in pixels
+ * @format: DRM-style fourcc, see drm_fourcc.h for standard formats
+ * @dma_bufs: dma_buf for each plane
+ * @offset: location of first pixel to scan out, in bytes
+ * @pitch: length of a scanline including padding, in bytes
+ * @n_planes: number of planes in buffer
+ * @acquire_fence: sync_fence which will clear when the buffer is
+ *     ready for display
+ *
+ * &struct adf_buffer is the in-kernel counterpart to the userspace-facing
+ * &struct adf_buffer_config.
+ */
+struct adf_buffer {
+       struct adf_overlay_engine *overlay_engine;
+
+       u32 w;
+       u32 h;
+       u32 format;
+
+       struct dma_buf *dma_bufs[ADF_MAX_PLANES];
+       u32 offset[ADF_MAX_PLANES];
+       u32 pitch[ADF_MAX_PLANES];
+       u8 n_planes;
+
+       struct sync_fence *acquire_fence;
+};
+
+/**
+ * struct adf_buffer_mapping - state for mapping a &struct adf_buffer into the
+ * display device
+ *
+ * @attachments: dma-buf attachment for each plane
+ * @sg_tables: SG tables for each plane
+ */
+struct adf_buffer_mapping {
+       struct dma_buf_attachment *attachments[ADF_MAX_PLANES];
+       struct sg_table *sg_tables[ADF_MAX_PLANES];
+};
+
+/**
+ * struct adf_post - request to flip to a new set of buffers
+ *
+ * @n_bufs: number of buffers displayed
+ * @bufs: buffers displayed
+ * @mappings: in-device mapping state for each buffer
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ *
+ * &struct adf_post is the in-kernel counterpart to the userspace-facing
+ * &struct adf_post_config.
+ */
+struct adf_post {
+       size_t n_bufs;
+       struct adf_buffer *bufs;
+       struct adf_buffer_mapping *mappings;
+
+       size_t custom_data_size;
+       void *custom_data;
+};
+
+/**
+ * struct adf_attachment - description of attachment between an overlay engine
+ * and an interface
+ *
+ * @overlay_engine: the overlay engine
+ * @interface: the interface
+ *
+ * &struct adf_attachment is the in-kernel counterpart to the userspace-facing
+ * &struct adf_attachment_config.
+ */
+struct adf_attachment {
+       struct adf_overlay_engine *overlay_engine;
+       struct adf_interface *interface;
+};
+
+struct adf_pending_post {
+       struct list_head head;
+       struct adf_post config;
+       void *state;
+};
+
+enum adf_obj_type {
+       ADF_OBJ_OVERLAY_ENGINE = 0,
+       ADF_OBJ_INTERFACE = 1,
+       ADF_OBJ_DEVICE = 2,
+};
+
+/**
+ * struct adf_obj_ops - common ADF object implementation ops
+ *
+ * @open: handle opening the object's device node
+ * @release: handle releasing an open file
+ * @ioctl: handle custom ioctls
+ *
+ * @supports_event: return whether the object supports generating events of type
+ *     @type
+ * @set_event: enable or disable events of type @type
+ * @event_type_str: return a string representation of custom event @type
+ *     (@type >= %ADF_EVENT_DEVICE_CUSTOM).
+ *
+ * @custom_data: copy up to %ADF_MAX_CUSTOM_DATA_SIZE bytes of driver-private
+ *     data into @data (allocated by ADF) and return the number of copied bytes
+ *     in @size.  Return 0 on success or an error code (<0) on failure.
+ */
+struct adf_obj_ops {
+       /* optional */
+       int (*open)(struct adf_obj *obj, struct inode *inode,
+                       struct file *file);
+       /* optional */
+       void (*release)(struct adf_obj *obj, struct inode *inode,
+                       struct file *file);
+       /* optional */
+       long (*ioctl)(struct adf_obj *obj, unsigned int cmd, unsigned long arg);
+
+       /* optional */
+       bool (*supports_event)(struct adf_obj *obj, enum adf_event_type type);
+       /* required if supports_event is implemented */
+       void (*set_event)(struct adf_obj *obj, enum adf_event_type type,
+                       bool enabled);
+       /* optional */
+       const char *(*event_type_str)(struct adf_obj *obj,
+                       enum adf_event_type type);
+
+       /* optional */
+       int (*custom_data)(struct adf_obj *obj, void *data, size_t *size);
+};
+
+struct adf_obj {
+       enum adf_obj_type type;
+       char name[ADF_NAME_LEN];
+       struct adf_device *parent;
+
+       const struct adf_obj_ops *ops;
+
+       struct device dev;
+
+       struct spinlock file_lock;
+       struct list_head file_list;
+
+       struct mutex event_lock;
+       struct rb_root event_refcount;
+
+       int id;
+       int minor;
+};
+
+/**
+ * struct adf_device_quirks - common display device quirks
+ *
+ * @buffer_padding: whether the last scanline of a buffer extends to the
+ *     buffer's pitch (@ADF_BUFFER_PADDED_TO_PITCH) or just to the visible
+ *     width (@ADF_BUFFER_UNPADDED)
+ */
+struct adf_device_quirks {
+       /* optional, defaults to ADF_BUFFER_PADDED_TO_PITCH */
+       enum {
+               ADF_BUFFER_PADDED_TO_PITCH = 0,
+               ADF_BUFFER_UNPADDED = 1,
+       } buffer_padding;
+};
+
+/**
+ * struct adf_device_ops - display device implementation ops
+ *
+ * @owner: device's module
+ * @base: common operations (see &struct adf_obj_ops)
+ * @quirks: device's quirks (see &struct adf_device_quirks)
+ *
+ * @attach: attach overlay engine @eng to interface @intf.  Return 0 on success
+ *     or error code (<0) on failure.
+ * @detach: detach overlay engine @eng from interface @intf.  Return 0 on
+ *     success or error code (<0) on failure.
+ *
+ * @validate_custom_format: validate the number and size of planes
+ *     in buffers with a custom format (i.e., not one of the @DRM_FORMAT_*
+ *     types defined in drm/drm_fourcc.h).  Return 0 if the buffer is valid or
+ *     an error code (<0) otherwise.
+ *
+ * @validate: validate that the proposed configuration @cfg is legal.  The
+ *     driver may optionally allocate and return some driver-private state in
+ *     @driver_state, which will be passed to the corresponding post().  The
+ *     driver may NOT commit any changes to hardware.  Return 0 if @cfg is
+ *     valid or an error code (<0) otherwise.
+ * @complete_fence: create a hardware-backed sync fence to be signaled when
+ *     @cfg is removed from the screen.  If unimplemented, ADF automatically
+ *     creates an sw_sync fence.  Return the sync fence on success or a
+ *     PTR_ERR() on failure.
+ * @post: flip @cfg onto the screen.  Wait for the display to begin scanning out
+ *     @cfg before returning.
+ * @advance_timeline: signal the sync fence for the last configuration to leave
+ *     the display.  If unimplemented, ADF automatically advances an sw_sync
+ *     timeline.
+ * @state_free: free driver-private state allocated during validate()
+ */
+struct adf_device_ops {
+       /* required */
+       struct module *owner;
+       const struct adf_obj_ops base;
+       /* optional */
+       const struct adf_device_quirks quirks;
+
+       /* optional */
+       int (*attach)(struct adf_device *dev, struct adf_overlay_engine *eng,
+                       struct adf_interface *intf);
+       /* optional */
+       int (*detach)(struct adf_device *dev, struct adf_overlay_engine *eng,
+                       struct adf_interface *intf);
+
+       /* required if any of the device's overlay engines supports at least one
+          custom format */
+       int (*validate_custom_format)(struct adf_device *dev,
+                       struct adf_buffer *buf);
+
+       /* required */
+       int (*validate)(struct adf_device *dev, struct adf_post *cfg,
+                       void **driver_state);
+       /* optional */
+       struct sync_fence *(*complete_fence)(struct adf_device *dev,
+                       struct adf_post *cfg, void *driver_state);
+       /* required */
+       void (*post)(struct adf_device *dev, struct adf_post *cfg,
+                       void *driver_state);
+       /* required if complete_fence is implemented */
+       void (*advance_timeline)(struct adf_device *dev,
+                       struct adf_post *cfg, void *driver_state);
+       /* required if validate allocates driver state */
+       void (*state_free)(struct adf_device *dev, void *driver_state);
+};
+
+struct adf_attachment_list {
+       struct adf_attachment attachment;
+       struct list_head head;
+};
+
+struct adf_device {
+       struct adf_obj base;
+       struct device *dev;
+
+       const struct adf_device_ops *ops;
+
+       struct mutex client_lock;
+
+       struct idr interfaces;
+       size_t n_interfaces;
+       struct idr overlay_engines;
+
+       struct list_head post_list;
+       struct mutex post_lock;
+       struct kthread_worker post_worker;
+       struct task_struct *post_thread;
+       struct kthread_work post_work;
+
+       struct list_head attached;
+       size_t n_attached;
+       struct list_head attach_allowed;
+       size_t n_attach_allowed;
+
+       struct adf_pending_post *onscreen;
+
+       struct sw_sync_timeline *timeline;
+       int timeline_max;
+};
+
+/**
+ * struct adf_interface_ops - display interface implementation ops
+ *
+ * @base: common operations (see &struct adf_obj_ops)
+ *
+ * @blank: change the display's DPMS state.  Return 0 on success or error
+ *     code (<0) on failure.
+ *
+ * @alloc_simple_buffer: allocate a buffer with the specified @w, @h, and
+ *     @format.  @format will be a standard RGB format (i.e.,
+ *     adf_format_is_rgb(@format) == true).  Return 0 on success or error code
+ *     (<0) on failure.  On success, return the buffer, offset, and pitch in
+ *     @dma_buf, @offset, and @pitch respectively.
+ * @describe_simple_post: provide driver-private data needed to post a single
+ *     buffer @buf.  Copy up to ADF_MAX_CUSTOM_DATA_SIZE bytes into @data
+ *     (allocated by ADF) and return the number of bytes in @size.  Return 0 on
+ *     success or error code (<0) on failure.
+ *
+ * @modeset: change the interface's mode.  @mode is not necessarily part of the
+ *     modelist passed to adf_hotplug_notify_connected(); the driver may
+ *     accept or reject custom modes at its discretion.  Return 0 on success or
+ *     error code (<0) if the mode could not be set.
+ *
+ * @screen_size: copy the screen dimensions in millimeters into @width_mm
+ *     and @height_mm.  Return 0 on success or error code (<0) if the display
+ *     dimensions are unknown.
+ *
+ * @type_str: return a string representation of custom @intf->type
+ *     (@intf->type >= @ADF_INTF_TYPE_DEVICE_CUSTOM).
+ */
+struct adf_interface_ops {
+       const struct adf_obj_ops base;
+
+       /* optional */
+       int (*blank)(struct adf_interface *intf, u8 state);
+
+       /* optional */
+       int (*alloc_simple_buffer)(struct adf_interface *intf,
+                       u16 w, u16 h, u32 format,
+                       struct dma_buf **dma_buf, u32 *offset, u32 *pitch);
+       /* optional */
+       int (*describe_simple_post)(struct adf_interface *intf,
+                       struct adf_buffer *fb, void *data, size_t *size);
+
+       /* optional */
+       int (*modeset)(struct adf_interface *intf,
+                       struct drm_mode_modeinfo *mode);
+
+       /* optional */
+       int (*screen_size)(struct adf_interface *intf, u16 *width_mm,
+                       u16 *height_mm);
+
+       /* optional */
+       const char *(*type_str)(struct adf_interface *intf);
+};
+
+struct adf_interface {
+       struct adf_obj base;
+       const struct adf_interface_ops *ops;
+
+       struct drm_mode_modeinfo current_mode;
+
+       enum adf_interface_type type;
+       u32 idx;
+       u32 flags;
+
+       wait_queue_head_t vsync_wait;
+       ktime_t vsync_timestamp;
+       rwlock_t vsync_lock;
+
+       u8 dpms_state;
+
+       bool hotplug_detect;
+       struct drm_mode_modeinfo *modelist;
+       size_t n_modes;
+       rwlock_t hotplug_modelist_lock;
+};
+
+/**
+ * struct adf_interface_ops - overlay engine implementation ops
+ *
+ * @base: common operations (see &struct adf_obj_ops)
+ *
+ * @supported_formats: list of fourccs the overlay engine can scan out
+ * @n_supported_formats: length of supported_formats, up to
+ *     ADF_MAX_SUPPORTED_FORMATS
+ */
+struct adf_overlay_engine_ops {
+       const struct adf_obj_ops base;
+
+       /* required */
+       const u32 *supported_formats;
+       /* required */
+       const size_t n_supported_formats;
+};
+
+struct adf_overlay_engine {
+       struct adf_obj base;
+
+       const struct adf_overlay_engine_ops *ops;
+};
+
+#define adf_obj_to_device(ptr) \
+       container_of((ptr), struct adf_device, base)
+
+#define adf_obj_to_interface(ptr) \
+       container_of((ptr), struct adf_interface, base)
+
+#define adf_obj_to_overlay_engine(ptr) \
+       container_of((ptr), struct adf_overlay_engine, base)
+
+int __printf(4, 5) adf_device_init(struct adf_device *dev,
+               struct device *parent, const struct adf_device_ops *ops,
+               const char *fmt, ...);
+void adf_device_destroy(struct adf_device *dev);
+int __printf(7, 8) adf_interface_init(struct adf_interface *intf,
+               struct adf_device *dev, enum adf_interface_type type, u32 idx,
+               u32 flags, const struct adf_interface_ops *ops, const char *fmt,
+               ...);
+void adf_interface_destroy(struct adf_interface *intf);
+static inline struct adf_device *adf_interface_parent(
+               struct adf_interface *intf)
+{
+       return intf->base.parent;
+}
+int __printf(4, 5) adf_overlay_engine_init(struct adf_overlay_engine *eng,
+               struct adf_device *dev,
+               const struct adf_overlay_engine_ops *ops, const char *fmt, ...);
+void adf_overlay_engine_destroy(struct adf_overlay_engine *eng);
+static inline struct adf_device *adf_overlay_engine_parent(
+               struct adf_overlay_engine *eng)
+{
+       return eng->base.parent;
+}
+
+int adf_attachment_allow(struct adf_device *dev, struct adf_overlay_engine *eng,
+               struct adf_interface *intf);
+
+const char *adf_obj_type_str(enum adf_obj_type type);
+const char *adf_interface_type_str(struct adf_interface *intf);
+const char *adf_event_type_str(struct adf_obj *obj, enum adf_event_type type);
+
+#define ADF_FORMAT_STR_SIZE 5
+void adf_format_str(u32 format, char buf[ADF_FORMAT_STR_SIZE]);
+int adf_format_validate_yuv(struct adf_device *dev, struct adf_buffer *buf,
+               u8 num_planes, u8 hsub, u8 vsub, u8 cpp[]);
+/**
+ * adf_format_validate_rgb - validate the number and size of planes in buffers
+ * with a custom RGB format.
+ *
+ * @dev: ADF device performing the validation
+ * @buf: buffer to validate
+ * @cpp: expected bytes per pixel
+ *
+ * adf_format_validate_rgb() is intended to be called as a helper from @dev's
+ * validate_custom_format() op.  @buf must have a single RGB plane.
+ *
+ * Returns 0 if @buf has a single plane with sufficient size, or -EINVAL
+ * otherwise.
+ */
+static inline int adf_format_validate_rgb(struct adf_device *dev,
+               struct adf_buffer *buf, u8 cpp)
+{
+       return adf_format_validate_yuv(dev, buf, 1, 1, 1, &cpp);
+}
+
+int adf_event_get(struct adf_obj *obj, enum adf_event_type type);
+int adf_event_put(struct adf_obj *obj, enum adf_event_type type);
+int adf_event_notify(struct adf_obj *obj, struct adf_event *event);
+
+static inline void adf_vsync_get(struct adf_interface *intf)
+{
+       adf_event_get(&intf->base, ADF_EVENT_VSYNC);
+}
+
+static inline void adf_vsync_put(struct adf_interface *intf)
+{
+       adf_event_put(&intf->base, ADF_EVENT_VSYNC);
+}
+
+int adf_vsync_wait(struct adf_interface *intf, long timeout);
+void adf_vsync_notify(struct adf_interface *intf, ktime_t timestamp);
+
+int adf_hotplug_notify_connected(struct adf_interface *intf,
+               struct drm_mode_modeinfo *modelist, size_t n_modes);
+void adf_hotplug_notify_disconnected(struct adf_interface *intf);
+
+void adf_modeinfo_set_name(struct drm_mode_modeinfo *mode);
+void adf_modeinfo_set_vrefresh(struct drm_mode_modeinfo *mode);
+
+#endif /* _VIDEO_ADF_H */
diff --git a/include/video/adf_client.h b/include/video/adf_client.h
new file mode 100644 (file)
index 0000000..983f2b6
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_CLIENT_H_
+#define _VIDEO_ADF_CLIENT_H_
+
+#include <video/adf.h>
+
+int adf_interface_blank(struct adf_interface *intf, u8 state);
+u8 adf_interface_dpms_state(struct adf_interface *intf);
+
+void adf_interface_current_mode(struct adf_interface *intf,
+               struct drm_mode_modeinfo *mode);
+size_t adf_interface_modelist(struct adf_interface *intf,
+               struct drm_mode_modeinfo *modelist, size_t n_modes);
+int adf_interface_set_mode(struct adf_interface *intf,
+               struct drm_mode_modeinfo *mode);
+int adf_interface_get_screen_size(struct adf_interface *intf, u16 *width,
+               u16 *height);
+int adf_interface_simple_buffer_alloc(struct adf_interface *intf, u16 w, u16 h,
+               u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch);
+struct sync_fence *adf_interface_simple_post(struct adf_interface *intf,
+               struct adf_buffer *buf);
+
+bool adf_overlay_engine_supports_format(struct adf_overlay_engine *eng,
+               u32 format);
+
+size_t adf_device_attachments(struct adf_device *dev,
+               struct adf_attachment *attachments, size_t n_attachments);
+size_t adf_device_attachments_allowed(struct adf_device *dev,
+               struct adf_attachment *attachments, size_t n_attachments);
+bool adf_device_attached(struct adf_device *dev, struct adf_overlay_engine *eng,
+               struct adf_interface *intf);
+bool adf_device_attach_allowed(struct adf_device *dev,
+               struct adf_overlay_engine *eng, struct adf_interface *intf);
+int adf_device_attach(struct adf_device *dev, struct adf_overlay_engine *eng,
+               struct adf_interface *intf);
+int adf_device_detach(struct adf_device *dev, struct adf_overlay_engine *eng,
+               struct adf_interface *intf);
+
+struct sync_fence *adf_device_post(struct adf_device *dev,
+               struct adf_interface **intfs, size_t n_intfs,
+               struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
+               size_t custom_data_size);
+struct sync_fence *adf_device_post_nocopy(struct adf_device *dev,
+               struct adf_interface **intfs, size_t n_intfs,
+               struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
+               size_t custom_data_size);
+
+#endif /* _VIDEO_ADF_CLIENT_H_ */
diff --git a/include/video/adf_fbdev.h b/include/video/adf_fbdev.h
new file mode 100644 (file)
index 0000000..b722c6b
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_FBDEV_H_
+#define _VIDEO_ADF_FBDEV_H_
+
+#include <linux/fb.h>
+#include <linux/mutex.h>
+#include <video/adf.h>
+
+struct adf_fbdev {
+       struct adf_interface *intf;
+       struct adf_overlay_engine *eng;
+       struct fb_info *info;
+       u32 pseudo_palette[16];
+
+       unsigned int refcount;
+       struct mutex refcount_lock;
+
+       struct dma_buf *dma_buf;
+       u32 offset;
+       u32 pitch;
+       void *vaddr;
+       u32 format;
+
+       u16 default_xres_virtual;
+       u16 default_yres_virtual;
+       u32 default_format;
+};
+
+#if IS_ENABLED(CONFIG_ADF_FBDEV)
+void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
+               struct fb_videomode *vmode);
+void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode,
+               struct drm_mode_modeinfo *mode);
+
+int adf_fbdev_init(struct adf_fbdev *fbdev, struct adf_interface *interface,
+               struct adf_overlay_engine *eng,
+               u16 xres_virtual, u16 yres_virtual, u32 format,
+               struct fb_ops *fbops, const char *fmt, ...);
+void adf_fbdev_destroy(struct adf_fbdev *fbdev);
+
+int adf_fbdev_open(struct fb_info *info, int user);
+int adf_fbdev_release(struct fb_info *info, int user);
+int adf_fbdev_check_var(struct fb_var_screeninfo *var, struct fb_info *info);
+int adf_fbdev_set_par(struct fb_info *info);
+int adf_fbdev_blank(int blank, struct fb_info *info);
+int adf_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *info);
+int adf_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
+#else
+static inline void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
+               struct fb_videomode *vmode)
+{
+       WARN_ONCE(1, "%s: CONFIG_ADF_FBDEV is disabled\n", __func__);
+}
+
+static inline void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode,
+               struct drm_mode_modeinfo *mode)
+{
+       WARN_ONCE(1, "%s: CONFIG_ADF_FBDEV is disabled\n", __func__);
+}
+
+static inline int adf_fbdev_init(struct adf_fbdev *fbdev,
+               struct adf_interface *interface,
+               struct adf_overlay_engine *eng,
+               u16 xres_virtual, u16 yres_virtual, u32 format,
+               struct fb_ops *fbops, const char *fmt, ...)
+{
+       return -ENODEV;
+}
+
+static inline void adf_fbdev_destroy(struct adf_fbdev *fbdev) { }
+
+static inline int adf_fbdev_open(struct fb_info *info, int user)
+{
+       return -ENODEV;
+}
+
+static inline int adf_fbdev_release(struct fb_info *info, int user)
+{
+       return -ENODEV;
+}
+
+static inline int adf_fbdev_check_var(struct fb_var_screeninfo *var,
+               struct fb_info *info)
+{
+       return -ENODEV;
+}
+
+static inline int adf_fbdev_set_par(struct fb_info *info)
+{
+       return -ENODEV;
+}
+
+static inline int adf_fbdev_blank(int blank, struct fb_info *info)
+{
+       return -ENODEV;
+}
+
+static inline int adf_fbdev_pan_display(struct fb_var_screeninfo *var,
+               struct fb_info *info)
+{
+       return -ENODEV;
+}
+
+static inline int adf_fbdev_mmap(struct fb_info *info,
+               struct vm_area_struct *vma)
+{
+       return -ENODEV;
+}
+#endif
+
+#endif /* _VIDEO_ADF_FBDEV_H_ */
diff --git a/include/video/adf_format.h b/include/video/adf_format.h
new file mode 100644 (file)
index 0000000..e03182c
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_FORMAT_H
+#define _VIDEO_ADF_FORMAT_H
+
+bool adf_format_is_standard(u32 format);
+bool adf_format_is_rgb(u32 format);
+u8 adf_format_num_planes(u32 format);
+u8 adf_format_bpp(u32 format);
+u8 adf_format_plane_cpp(u32 format, int plane);
+u8 adf_format_horz_chroma_subsampling(u32 format);
+u8 adf_format_vert_chroma_subsampling(u32 format);
+
+#endif /* _VIDEO_ADF_FORMAT_H */
diff --git a/include/video/adf_memblock.h b/include/video/adf_memblock.h
new file mode 100644 (file)
index 0000000..6256e0e
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_MEMBLOCK_H_
+#define _VIDEO_ADF_MEMBLOCK_H_
+
+struct dma_buf *adf_memblock_export(phys_addr_t base, size_t size, int flags);
+
+#endif /* _VIDEO_ADF_MEMBLOCK_H_ */
index 5d6febaea56d647d91fcd6b1f737be76ceb7a2a8..051ac0c6b16f1acef6d9c9b224858bb10f2625e6 100644 (file)
@@ -1251,6 +1251,12 @@ config HOTPLUG
 config HAVE_PCSPKR_PLATFORM
        bool
 
+config PANIC_TIMEOUT
+       int "Default panic timeout"
+       default 0
+       help
+         Set default panic timeout.
+
 menuconfig EXPERT
        bool "Configure standard kernel features (expert users)"
        # Unhide debug options, to make the on-by-default options visible
index d0def7fc2848dd899281b2bb2527f05fa79ec7a9..cd1c303214f32c20672538283d1e053323abb65c 100644 (file)
@@ -2106,6 +2106,24 @@ out_free_group_list:
        return retval;
 }
 
+static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+       struct cgroup_subsys *ss;
+       int ret;
+
+       for_each_subsys(cgrp->root, ss) {
+               if (ss->allow_attach) {
+                       ret = ss->allow_attach(cgrp, tset);
+                       if (ret)
+                               return ret;
+               } else {
+                       return -EACCES;
+               }
+       }
+
+       return 0;
+}
+
 /*
  * Find the task_struct of the task to attach by vpid and pass it along to the
  * function to attach either it or all tasks in its threadgroup. Will lock
@@ -2137,9 +2155,18 @@ retry_find_task:
                if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
                    !uid_eq(cred->euid, tcred->uid) &&
                    !uid_eq(cred->euid, tcred->suid)) {
-                       rcu_read_unlock();
-                       ret = -EACCES;
-                       goto out_unlock_cgroup;
+                       /*
+                        * if the default permission check fails, give each
+                        * cgroup a chance to extend the permission check
+                        */
+                       struct cgroup_taskset tset = { };
+                       tset.single.task = tsk;
+                       tset.single.cgrp = cgrp;
+                       ret = cgroup_allow_attach(cgrp, &tset);
+                       if (ret) {
+                               rcu_read_unlock();
+                               goto out_unlock_cgroup;
+                       }
                }
        } else
                tsk = current;
index bc255e25d5ddced3bcda1cca7f2d61ba3e7c6cac..7d4755634d32c38b55e86944283a49bdebf5ec97 100644 (file)
@@ -728,3 +728,23 @@ void init_cpu_online(const struct cpumask *src)
 {
        cpumask_copy(to_cpumask(cpu_online_bits), src);
 }
+
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+       atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+       atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+
+void idle_notifier_call_chain(unsigned long val)
+{
+       atomic_notifier_call_chain(&idle_notifier, val, NULL);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
index 0506d447aed270867214c9da188b4ac8a22ebecf..2d4438b14b4230e9bd19fea15f2bd13f39bda81f 100644 (file)
@@ -86,6 +86,10 @@ static int kgdb_use_con;
 bool dbg_is_early = true;
 /* Next cpu to become the master debug core */
 int dbg_switch_cpu;
+/* Flag for entering kdb when a panic occurs */
+static bool break_on_panic = true;
+/* Flag for entering kdb when an exception occurs */
+static bool break_on_exception = true;
 
 /* Use kdb or gdbserver mode */
 int dbg_kdb_mode = 1;
@@ -100,6 +104,8 @@ early_param("kgdbcon", opt_kgdb_con);
 
 module_param(kgdb_use_con, int, 0644);
 module_param(kgdbreboot, int, 0644);
+module_param(break_on_panic, bool, 0644);
+module_param(break_on_exception, bool, 0644);
 
 /*
  * Holds information about breakpoints in a kernel. These breakpoints are
@@ -678,6 +684,9 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
        if (arch_kgdb_ops.enable_nmi)
                arch_kgdb_ops.enable_nmi(0);
 
+       if (unlikely(signo != SIGTRAP && !break_on_exception))
+               return 1;
+
        ks->cpu                 = raw_smp_processor_id();
        ks->ex_vector           = evector;
        ks->signo               = signo;
@@ -784,6 +793,9 @@ static int kgdb_panic_event(struct notifier_block *self,
                            unsigned long val,
                            void *data)
 {
+       if (!break_on_panic)
+               return NOTIFY_DONE;
+
        if (dbg_kdb_mode)
                kdb_printf("PANIC: %s\n", (char *)data);
        kgdb_breakpoint();
index 14ff4849262c0c6ecb275ba30da19724819b548c..4b0fb2fb779c84c6bce7a8e64cb1af5e77b2b9c9 100644 (file)
@@ -216,7 +216,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
        int i;
        int diag, dtab_count;
        int key;
-
+       static int last_crlf;
 
        diag = kdbgetintenv("DTABCOUNT", &dtab_count);
        if (diag)
@@ -237,6 +237,9 @@ poll_again:
                return buffer;
        if (key != 9)
                tab = 0;
+       if (key != 10 && key != 13)
+               last_crlf = 0;
+
        switch (key) {
        case 8: /* backspace */
                if (cp > buffer) {
@@ -254,7 +257,12 @@ poll_again:
                        *cp = tmp;
                }
                break;
-       case 13: /* enter */
+       case 10: /* new line */
+       case 13: /* carriage return */
+               /* handle \n after \r */
+               if (last_crlf && last_crlf != key)
+                       break;
+               last_crlf = key;
                *lastchar++ = '\n';
                *lastchar++ = '\0';
                if (!KDB_STATE(KGDB_TRANS)) {
index 6682b2ea5b11aa3680fe4ccdf62e8d384ddc78b0..89cca291f86388ca51d7704cc994f3a1c2a37abf 100644 (file)
@@ -840,7 +840,7 @@ void do_exit(long code)
        /*
         * Make sure we are holding no locks:
         */
-       debug_check_no_locks_held(tsk);
+       debug_check_no_locks_held();
        /*
         * We can do this unlocked here. The futex code uses this flag
         * just to verify whether the pi state cleanup has been done
index 814363a69b80142822e20990eced1c7f1fe20215..a0fbe5277226bbe679d03cc8efea6c3bf145f46b 100644 (file)
@@ -198,6 +198,9 @@ struct kmem_cache *vm_area_cachep;
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
+/* Notifier list called when a task struct is freed */
+static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
+
 static void account_kernel_stack(struct thread_info *ti, int account)
 {
        struct zone *zone = page_zone(virt_to_page(ti));
@@ -231,6 +234,18 @@ static inline void put_signal_struct(struct signal_struct *sig)
                free_signal_struct(sig);
 }
 
+int task_free_register(struct notifier_block *n)
+{
+       return atomic_notifier_chain_register(&task_free_notifier, n);
+}
+EXPORT_SYMBOL(task_free_register);
+
+int task_free_unregister(struct notifier_block *n)
+{
+       return atomic_notifier_chain_unregister(&task_free_notifier, n);
+}
+EXPORT_SYMBOL(task_free_unregister);
+
 void __put_task_struct(struct task_struct *tsk)
 {
        WARN_ON(!tsk->exit_state);
@@ -242,6 +257,7 @@ void __put_task_struct(struct task_struct *tsk)
        delayacct_tsk_free(tsk);
        put_signal_struct(tsk->signal);
 
+       atomic_notifier_call_chain(&task_free_notifier, 0, tsk);
        if (!profile_handoff_task(tsk))
                free_task(tsk);
 }
@@ -697,7 +713,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
 
        mm = get_task_mm(task);
        if (mm && mm != current->mm &&
-                       !ptrace_may_access(task, mode)) {
+                       !ptrace_may_access(task, mode) &&
+                       !capable(CAP_SYS_RESOURCE)) {
                mmput(mm);
                mm = ERR_PTR(-EACCES);
        }
index 78758512b1e1ef6994bb91371e5bbe5e8a43e686..5420f635111f4b0b9d28760276424e6aa39a7b21 100644 (file)
@@ -116,6 +116,18 @@ bool freeze_task(struct task_struct *p)
 {
        unsigned long flags;
 
+       /*
+        * This check can race with freezer_do_not_count, but worst case that
+        * will result in an extra wakeup being sent to the task.  It does not
+        * race with freezer_count(), the barriers in freezer_count() and
+        * freezer_should_skip() ensure that either freezer_count() sees
+        * freezing == true in try_to_freeze() and freezes, or
+        * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task
+        * normally.
+        */
+       if (freezer_should_skip(p))
+               return false;
+
        spin_lock_irqsave(&freezer_lock, flags);
        if (!freezing(p) || frozen(p)) {
                spin_unlock_irqrestore(&freezer_lock, flags);
index 625a4e659e7a8bf58d4cdbb6108e22218456f821..ad971d0f0be0c5fab77c5440c0bb464fa41a0bbf 100644 (file)
@@ -62,6 +62,7 @@
 #include <linux/ptrace.h>
 #include <linux/sched/rt.h>
 #include <linux/hugetlb.h>
+#include <linux/freezer.h>
 
 #include <asm/futex.h>
 
@@ -1935,7 +1936,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
                 * is no timeout, or if it has yet to expire.
                 */
                if (!timeout || timeout->task)
-                       schedule();
+                       freezable_schedule();
        }
        __set_current_state(TASK_RUNNING);
 }
index aadf4b7a607c8bfb8a032b12fb067446628f90c4..1e84be107436a3969fbf61569cebf921e4e02e4c 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/sched/sysctl.h>
 #include <linux/sched/rt.h>
 #include <linux/timer.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 
@@ -1565,7 +1566,7 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
                        t->task = NULL;
 
                if (likely(t->task))
-                       schedule();
+                       freezable_schedule();
 
                hrtimer_cancel(&t->timer);
                mode = HRTIMER_MODE_ABS;
index abcd6ca86cb76b56e5979613a1964c0db743b5d0..c72b7a43beb9428d3c134584e394db9252569cd2 100644 (file)
@@ -103,14 +103,14 @@ int check_wakeup_irqs(void)
        int irq;
 
        for_each_irq_desc(irq, desc) {
-               /*
-                * Only interrupts which are marked as wakeup source
-                * and have not been disabled before the suspend check
-                * can abort suspend.
-                */
                if (irqd_is_wakeup_set(&desc->irq_data)) {
-                       if (desc->depth == 1 && desc->istate & IRQS_PENDING)
+                       if (desc->istate & IRQS_PENDING) {
+                               pr_info("Wakeup IRQ %d %s pending, suspend aborted\n",
+                                       irq,
+                                       desc->action && desc->action->name ?
+                                       desc->action->name : "");
                                return -EBUSY;
+                       }
                        continue;
                }
                /*
index 1f3186b37fd5390be1534f895eb413de19e0a6d7..e16c45b9ee77054f80becd37a51da00776f796c1 100644 (file)
@@ -4090,7 +4090,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
 }
 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
-static void print_held_locks_bug(struct task_struct *curr)
+static void print_held_locks_bug(void)
 {
        if (!debug_locks_off())
                return;
@@ -4099,22 +4099,21 @@ static void print_held_locks_bug(struct task_struct *curr)
 
        printk("\n");
        printk("=====================================\n");
-       printk("[ BUG: lock held at task exit time! ]\n");
+       printk("[ BUG: %s/%d still has locks held! ]\n",
+              current->comm, task_pid_nr(current));
        print_kernel_ident();
        printk("-------------------------------------\n");
-       printk("%s/%d is exiting with locks still held!\n",
-               curr->comm, task_pid_nr(curr));
-       lockdep_print_held_locks(curr);
-
+       lockdep_print_held_locks(current);
        printk("\nstack backtrace:\n");
        dump_stack();
 }
 
-void debug_check_no_locks_held(struct task_struct *task)
+void debug_check_no_locks_held(void)
 {
-       if (unlikely(task->lockdep_depth > 0))
-               print_held_locks_bug(task);
+       if (unlikely(current->lockdep_depth > 0))
+               print_held_locks_bug();
 }
+EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
 
 void debug_show_all_locks(void)
 {
index 167ec097ce8b3851d8f78c2ac7ef2614c840d517..126b2ef2eb618bcd39f827ab43a4162092f70284 100644 (file)
 #define PANIC_TIMER_STEP 100
 #define PANIC_BLINK_SPD 18
 
+/* Machine specific panic information string */
+char *mach_panic_string;
+
 int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
 static unsigned long tainted_mask;
 static int pause_on_oops;
 static int pause_on_oops_flag;
 static DEFINE_SPINLOCK(pause_on_oops_lock);
 
-int panic_timeout;
+#ifndef CONFIG_PANIC_TIMEOUT
+#define CONFIG_PANIC_TIMEOUT 0
+#endif
+int panic_timeout = CONFIG_PANIC_TIMEOUT;
 EXPORT_SYMBOL_GPL(panic_timeout);
 
 ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
@@ -375,6 +381,11 @@ late_initcall(init_oops_id);
 void print_oops_end_marker(void)
 {
        init_oops_id();
+
+       if (mach_panic_string)
+               printk(KERN_WARNING "Board Information: %s\n",
+                      mach_panic_string);
+
        printk(KERN_WARNING "---[ end trace %016llx ]---\n",
                (unsigned long long)oops_id);
 }
index 46455961a88fb75c3ce4e426c4f9b8ff022ecbcc..7a297aeeca9ede3c2c400e5bedb264c24b2df2b1 100644 (file)
@@ -18,6 +18,14 @@ config SUSPEND_FREEZER
 
          Turning OFF this setting is NOT recommended! If in doubt, say Y.
 
+config HAS_WAKELOCK
+       bool
+       default y
+
+config WAKELOCK
+       bool
+       default y
+
 config HIBERNATE_CALLBACKS
        bool
 
@@ -294,3 +302,10 @@ config PM_GENERIC_DOMAINS_RUNTIME
 config CPU_PM
        bool
        depends on SUSPEND || CPU_IDLE
+
+config SUSPEND_TIME
+       bool "Log time spent in suspend"
+       ---help---
+         Prints the time spent in suspend in the kernel log, and
+         keeps statistics on the time spent in suspend in
+         /sys/kernel/debug/suspend_time
index 29472bff11ef9b1c902e71a7ae9823a9e36c06e6..74c713ba61b0708c37fa927c69f145b0b5458dd4 100644 (file)
@@ -11,5 +11,8 @@ obj-$(CONFIG_HIBERNATION)     += hibernate.o snapshot.o swap.o user.o \
                                   block_io.o
 obj-$(CONFIG_PM_AUTOSLEEP)     += autosleep.o
 obj-$(CONFIG_PM_WAKELOCKS)     += wakelock.o
+obj-$(CONFIG_SUSPEND_TIME)     += suspend_time.o
 
 obj-$(CONFIG_MAGIC_SYSRQ)      += poweroff.o
+
+obj-$(CONFIG_SUSPEND)  += wakeup_reason.o
index 1b212bee1510cb22b6e1df1c2ba00fff6b6cf4e6..4ac9ce12679a92a01b55856f401bad2261f09072 100644 (file)
@@ -30,9 +30,10 @@ static int try_to_freeze_tasks(bool user_only)
        unsigned int todo;
        bool wq_busy = false;
        struct timeval start, end;
-       u64 elapsed_csecs64;
-       unsigned int elapsed_csecs;
+       u64 elapsed_msecs64;
+       unsigned int elapsed_msecs;
        bool wakeup = false;
+       int sleep_usecs = USEC_PER_MSEC;
 
        do_gettimeofday(&start);
 
@@ -68,22 +69,25 @@ static int try_to_freeze_tasks(bool user_only)
 
                /*
                 * We need to retry, but first give the freezing tasks some
-                * time to enter the refrigerator.
+                * time to enter the refrigerator.  Start with an initial
+                * 1 ms sleep followed by exponential backoff until 8 ms.
                 */
-               msleep(10);
+               usleep_range(sleep_usecs / 2, sleep_usecs);
+               if (sleep_usecs < 8 * USEC_PER_MSEC)
+                       sleep_usecs *= 2;
        }
 
        do_gettimeofday(&end);
-       elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
-       do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
-       elapsed_csecs = elapsed_csecs64;
+       elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
+       do_div(elapsed_msecs64, NSEC_PER_MSEC);
+       elapsed_msecs = elapsed_msecs64;
 
        if (todo) {
                printk("\n");
-               printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
+               printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
                       "(%d tasks refusing to freeze, wq_busy=%d):\n",
                       wakeup ? "aborted" : "failed",
-                      elapsed_csecs / 100, elapsed_csecs % 100,
+                      elapsed_msecs / 1000, elapsed_msecs % 1000,
                       todo - wq_busy, wq_busy);
 
                if (!wakeup) {
@@ -96,8 +100,8 @@ static int try_to_freeze_tasks(bool user_only)
                        read_unlock(&tasklist_lock);
                }
        } else {
-               printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100,
-                       elapsed_csecs % 100);
+               printk("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
+                       elapsed_msecs % 1000);
        }
 
        return todo ? -EBUSY : 0;
index bef86d121eb2ca5bcba2001a447f38e90476c5b5..454568e6c8d280a59a9f2e3e0ed50c6af6f27f90 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
 #include <linux/ftrace.h>
+#include <linux/rtc.h>
 #include <trace/events/power.h>
 
 #include "power.h"
@@ -358,6 +359,18 @@ static int enter_state(suspend_state_t state)
        return error;
 }
 
+static void pm_suspend_marker(char *annotation)
+{
+       struct timespec ts;
+       struct rtc_time tm;
+
+       getnstimeofday(&ts);
+       rtc_time_to_tm(ts.tv_sec, &tm);
+       pr_info("PM: suspend %s %d-%02d-%02d %02d:%02d:%02d.%09lu UTC\n",
+               annotation, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+               tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
+}
+
 /**
  * pm_suspend - Externally visible function for suspending the system.
  * @state: System sleep state to enter.
@@ -372,6 +385,7 @@ int pm_suspend(suspend_state_t state)
        if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
                return -EINVAL;
 
+       pm_suspend_marker("entry");
        error = enter_state(state);
        if (error) {
                suspend_stats.fail++;
@@ -379,6 +393,7 @@ int pm_suspend(suspend_state_t state)
        } else {
                suspend_stats.success++;
        }
+       pm_suspend_marker("exit");
        return error;
 }
 EXPORT_SYMBOL(pm_suspend);
diff --git a/kernel/power/suspend_time.c b/kernel/power/suspend_time.c
new file mode 100644 (file)
index 0000000..d2a65da
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * debugfs file to track time spent in suspend
+ *
+ * Copyright (c) 2011, Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/syscore_ops.h>
+#include <linux/time.h>
+
+static struct timespec suspend_time_before;
+static unsigned int time_in_suspend_bins[32];
+
+#ifdef CONFIG_DEBUG_FS
+static int suspend_time_debug_show(struct seq_file *s, void *data)
+{
+       int bin;
+       seq_printf(s, "time (secs)  count\n");
+       seq_printf(s, "------------------\n");
+       for (bin = 0; bin < 32; bin++) {
+               if (time_in_suspend_bins[bin] == 0)
+                       continue;
+               seq_printf(s, "%4d - %4d %4u\n",
+                       bin ? 1 << (bin - 1) : 0, 1 << bin,
+                               time_in_suspend_bins[bin]);
+       }
+       return 0;
+}
+
+static int suspend_time_debug_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, suspend_time_debug_show, NULL);
+}
+
+static const struct file_operations suspend_time_debug_fops = {
+       .open           = suspend_time_debug_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int __init suspend_time_debug_init(void)
+{
+       struct dentry *d;
+
+       d = debugfs_create_file("suspend_time", 0755, NULL, NULL,
+               &suspend_time_debug_fops);
+       if (!d) {
+               pr_err("Failed to create suspend_time debug file\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+late_initcall(suspend_time_debug_init);
+#endif
+
+static int suspend_time_syscore_suspend(void)
+{
+       read_persistent_clock(&suspend_time_before);
+
+       return 0;
+}
+
+static void suspend_time_syscore_resume(void)
+{
+       struct timespec after;
+
+       read_persistent_clock(&after);
+
+       after = timespec_sub(after, suspend_time_before);
+
+       time_in_suspend_bins[fls(after.tv_sec)]++;
+
+       pr_info("Suspended for %lu.%03lu seconds\n", after.tv_sec,
+               after.tv_nsec / NSEC_PER_MSEC);
+}
+
+static struct syscore_ops suspend_time_syscore_ops = {
+       .suspend = suspend_time_syscore_suspend,
+       .resume = suspend_time_syscore_resume,
+};
+
+static int suspend_time_syscore_init(void)
+{
+       register_syscore_ops(&suspend_time_syscore_ops);
+
+       return 0;
+}
+
+static void suspend_time_syscore_exit(void)
+{
+       unregister_syscore_ops(&suspend_time_syscore_ops);
+}
+module_init(suspend_time_syscore_init);
+module_exit(suspend_time_syscore_exit);
index 8f50de394d22b30090d1cf62cd3fd3069d451d57..c8fba3380076afd8db340855340e5886b8497dc6 100644 (file)
@@ -9,7 +9,6 @@
  * manipulate wakelocks on Android.
  */
 
-#include <linux/capability.h>
 #include <linux/ctype.h>
 #include <linux/device.h>
 #include <linux/err.h>
@@ -189,9 +188,6 @@ int pm_wake_lock(const char *buf)
        size_t len;
        int ret = 0;
 
-       if (!capable(CAP_BLOCK_SUSPEND))
-               return -EPERM;
-
        while (*str && !isspace(*str))
                str++;
 
@@ -235,9 +231,6 @@ int pm_wake_unlock(const char *buf)
        size_t len;
        int ret = 0;
 
-       if (!capable(CAP_BLOCK_SUSPEND))
-               return -EPERM;
-
        len = strlen(buf);
        if (!len)
                return -EINVAL;
diff --git a/kernel/power/wakeup_reason.c b/kernel/power/wakeup_reason.c
new file mode 100644 (file)
index 0000000..187e4e9
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * kernel/power/wakeup_reason.c
+ *
+ * Logs the reasons which caused the kernel to resume from
+ * the suspend mode.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/wakeup_reason.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+
+
+#define MAX_WAKEUP_REASON_IRQS 32
+static int irq_list[MAX_WAKEUP_REASON_IRQS];
+static int irqcount;
+static struct kobject *wakeup_reason;
+static spinlock_t resume_reason_lock;
+
+static ssize_t last_resume_reason_show(struct kobject *kobj, struct kobj_attribute *attr,
+               char *buf)
+{
+       int irq_no, buf_offset = 0;
+       struct irq_desc *desc;
+       spin_lock(&resume_reason_lock);
+       for (irq_no = 0; irq_no < irqcount; irq_no++) {
+               desc = irq_to_desc(irq_list[irq_no]);
+               if (desc && desc->action && desc->action->name)
+                       buf_offset += sprintf(buf + buf_offset, "%d %s\n",
+                                       irq_list[irq_no], desc->action->name);
+               else
+                       buf_offset += sprintf(buf + buf_offset, "%d\n",
+                                       irq_list[irq_no]);
+       }
+       spin_unlock(&resume_reason_lock);
+       return buf_offset;
+}
+
+static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
+
+static struct attribute *attrs[] = {
+       &resume_reason.attr,
+       NULL,
+};
+static struct attribute_group attr_group = {
+       .attrs = attrs,
+};
+
+/*
+ * logs all the wake up reasons to the kernel
+ * stores the irqs to expose them to the userspace via sysfs
+ */
+void log_wakeup_reason(int irq)
+{
+       struct irq_desc *desc;
+       desc = irq_to_desc(irq);
+       if (desc && desc->action && desc->action->name)
+               printk(KERN_INFO "Resume caused by IRQ %d, %s\n", irq,
+                               desc->action->name);
+       else
+               printk(KERN_INFO "Resume caused by IRQ %d\n", irq);
+
+       spin_lock(&resume_reason_lock);
+       if (irqcount == MAX_WAKEUP_REASON_IRQS) {
+               spin_unlock(&resume_reason_lock);
+               printk(KERN_WARNING "Resume caused by more than %d IRQs\n",
+                               MAX_WAKEUP_REASON_IRQS);
+               return;
+       }
+
+       irq_list[irqcount++] = irq;
+       spin_unlock(&resume_reason_lock);
+}
+
+/* Detects a suspend and clears all the previous wake up reasons*/
+static int wakeup_reason_pm_event(struct notifier_block *notifier,
+               unsigned long pm_event, void *unused)
+{
+       switch (pm_event) {
+       case PM_SUSPEND_PREPARE:
+               spin_lock(&resume_reason_lock);
+               irqcount = 0;
+               spin_unlock(&resume_reason_lock);
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block wakeup_reason_pm_notifier_block = {
+       .notifier_call = wakeup_reason_pm_event,
+};
+
+/* Initializes the sysfs parameter
+ * registers the pm_event notifier
+ */
+int __init wakeup_reason_init(void)
+{
+       int retval;
+       spin_lock_init(&resume_reason_lock);
+       retval = register_pm_notifier(&wakeup_reason_pm_notifier_block);
+       if (retval)
+               printk(KERN_WARNING "[%s] failed to register PM notifier %d\n",
+                               __func__, retval);
+
+       wakeup_reason = kobject_create_and_add("wakeup_reasons", kernel_kobj);
+       if (!wakeup_reason) {
+               printk(KERN_WARNING "[%s] failed to create a sysfs kobject\n",
+                               __func__);
+               return 1;
+       }
+       retval = sysfs_create_group(wakeup_reason, &attr_group);
+       if (retval) {
+               kobject_put(wakeup_reason);
+               printk(KERN_WARNING "[%s] failed to create a sysfs group %d\n",
+                               __func__, retval);
+       }
+       return 0;
+}
+
+late_initcall(wakeup_reason_init);
index aa08f6419bebf375901703adcec398b62232955b..9408d236c78fee5f00a0e1eb70e3f88ed5addc55 100644 (file)
@@ -7138,13 +7138,24 @@ static inline int preempt_count_equals(int preempt_offset)
        return (nested == preempt_offset);
 }
 
+static int __might_sleep_init_called;
+int __init __might_sleep_init(void)
+{
+       __might_sleep_init_called = 1;
+       return 0;
+}
+early_initcall(__might_sleep_init);
+
 void __might_sleep(const char *file, int line, int preempt_offset)
 {
        static unsigned long prev_jiffy;        /* ratelimiting */
 
        rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
        if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
-           system_state != SYSTEM_RUNNING || oops_in_progress)
+           oops_in_progress)
+               return;
+       if (system_state != SYSTEM_RUNNING &&
+           (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
                return;
        if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
                return;
@@ -7750,6 +7761,23 @@ static void cpu_cgroup_css_offline(struct cgroup *cgrp)
        sched_offline_group(tg);
 }
 
+static int
+cpu_cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+       const struct cred *cred = current_cred(), *tcred;
+       struct task_struct *task;
+
+       cgroup_taskset_for_each(task, cgrp, tset) {
+               tcred = __task_cred(task);
+
+               if ((current != task) && !capable(CAP_SYS_NICE) &&
+                   cred->euid != tcred->uid && cred->euid != tcred->suid)
+                       return -EACCES;
+       }
+
+       return 0;
+}
+
 static int cpu_cgroup_can_attach(struct cgroup *cgrp,
                                 struct cgroup_taskset *tset)
 {
@@ -8116,6 +8144,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
        .css_offline    = cpu_cgroup_css_offline,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
+       .allow_attach   = cpu_cgroup_allow_attach,
        .exit           = cpu_cgroup_exit,
        .subsys_id      = cpu_cgroup_subsys_id,
        .base_cftypes   = cpu_files,
index 113411bfe8b1205ad0f26556776f2a317e06eb0c..50e41075ac77105fd26d190b2068929af6c4becc 100644 (file)
@@ -2848,7 +2848,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
                recalc_sigpending();
                spin_unlock_irq(&tsk->sighand->siglock);
 
-               timeout = schedule_timeout_interruptible(timeout);
+               timeout = freezable_schedule_timeout_interruptible(timeout);
 
                spin_lock_irq(&tsk->sighand->siglock);
                __set_task_blocked(tsk, &tsk->real_blocked);
index 2bbd9a73b54c27b0e75eb651d931f0987ff870ed..ab7fda5fbe188559fcb7924fa5944e8439c8e856 100644 (file)
@@ -42,6 +42,9 @@
 #include <linux/syscore_ops.h>
 #include <linux/version.h>
 #include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/mempolicy.h>
+#include <linux/sched.h>
 
 #include <linux/compat.h>
 #include <linux/syscalls.h>
@@ -2099,10 +2102,158 @@ static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
 }
 #endif
 
+#ifdef CONFIG_MMU
+static int prctl_update_vma_anon_name(struct vm_area_struct *vma,
+               struct vm_area_struct **prev,
+               unsigned long start, unsigned long end,
+               const char __user *name_addr)
+{
+       struct mm_struct * mm = vma->vm_mm;
+       int error = 0;
+       pgoff_t pgoff;
+
+       if (name_addr == vma_get_anon_name(vma)) {
+               *prev = vma;
+               goto out;
+       }
+
+       pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
+       *prev = vma_merge(mm, *prev, start, end, vma->vm_flags, vma->anon_vma,
+                               vma->vm_file, pgoff, vma_policy(vma),
+                               name_addr);
+       if (*prev) {
+               vma = *prev;
+               goto success;
+       }
+
+       *prev = vma;
+
+       if (start != vma->vm_start) {
+               error = split_vma(mm, vma, start, 1);
+               if (error)
+                       goto out;
+       }
+
+       if (end != vma->vm_end) {
+               error = split_vma(mm, vma, end, 0);
+               if (error)
+                       goto out;
+       }
+
+success:
+       if (!vma->vm_file)
+               vma->shared.anon_name = name_addr;
+
+out:
+       if (error == -ENOMEM)
+               error = -EAGAIN;
+       return error;
+}
+
+static int prctl_set_vma_anon_name(unsigned long start, unsigned long end,
+                       unsigned long arg)
+{
+       unsigned long tmp;
+       struct vm_area_struct * vma, *prev;
+       int unmapped_error = 0;
+       int error = -EINVAL;
+
+       /*
+        * If the interval [start,end) covers some unmapped address
+        * ranges, just ignore them, but return -ENOMEM at the end.
+        * - this matches the handling in madvise.
+        */
+       vma = find_vma_prev(current->mm, start, &prev);
+       if (vma && start > vma->vm_start)
+               prev = vma;
+
+       for (;;) {
+               /* Still start < end. */
+               error = -ENOMEM;
+               if (!vma)
+                       return error;
+
+               /* Here start < (end|vma->vm_end). */
+               if (start < vma->vm_start) {
+                       unmapped_error = -ENOMEM;
+                       start = vma->vm_start;
+                       if (start >= end)
+                               return error;
+               }
+
+               /* Here vma->vm_start <= start < (end|vma->vm_end) */
+               tmp = vma->vm_end;
+               if (end < tmp)
+                       tmp = end;
+
+               /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
+               error = prctl_update_vma_anon_name(vma, &prev, start, end,
+                               (const char __user *)arg);
+               if (error)
+                       return error;
+               start = tmp;
+               if (prev && start < prev->vm_end)
+                       start = prev->vm_end;
+               error = unmapped_error;
+               if (start >= end)
+                       return error;
+               if (prev)
+                       vma = prev->vm_next;
+               else    /* madvise_remove dropped mmap_sem */
+                       vma = find_vma(current->mm, start);
+       }
+}
+
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+               unsigned long len_in, unsigned long arg)
+{
+       struct mm_struct *mm = current->mm;
+       int error;
+       unsigned long len;
+       unsigned long end;
+
+       if (start & ~PAGE_MASK)
+               return -EINVAL;
+       len = (len_in + ~PAGE_MASK) & PAGE_MASK;
+
+       /* Check to see whether len was rounded up from small -ve to zero */
+       if (len_in && !len)
+               return -EINVAL;
+
+       end = start + len;
+       if (end < start)
+               return -EINVAL;
+
+       if (end == start)
+               return 0;
+
+       down_write(&mm->mmap_sem);
+
+       switch (opt) {
+       case PR_SET_VMA_ANON_NAME:
+               error = prctl_set_vma_anon_name(start, end, arg);
+               break;
+       default:
+               error = -EINVAL;
+       }
+
+       up_write(&mm->mmap_sem);
+
+       return error;
+}
+#else /* CONFIG_MMU */
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+               unsigned long len_in, unsigned long arg)
+{
+       return -EINVAL;
+}
+#endif
+
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                unsigned long, arg4, unsigned long, arg5)
 {
        struct task_struct *me = current;
+       struct task_struct *tsk;
        unsigned char comm[sizeof(me->comm)];
        long error;
 
@@ -2226,6 +2377,26 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                        else
                                return -EINVAL;
                        break;
+               case PR_SET_TIMERSLACK_PID:
+                       if (current->pid != (pid_t)arg3 &&
+                                       !capable(CAP_SYS_NICE))
+                               return -EPERM;
+                       rcu_read_lock();
+                       tsk = find_task_by_pid_ns((pid_t)arg3, &init_pid_ns);
+                       if (tsk == NULL) {
+                               rcu_read_unlock();
+                               return -EINVAL;
+                       }
+                       get_task_struct(tsk);
+                       rcu_read_unlock();
+                       if (arg2 <= 0)
+                               tsk->timer_slack_ns =
+                                       tsk->default_timer_slack_ns;
+                       else
+                               tsk->timer_slack_ns = arg2;
+                       put_task_struct(tsk);
+                       error = 0;
+                       break;
                default:
                        return -EINVAL;
                }
@@ -2262,6 +2433,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                if (arg2 || arg3 || arg4 || arg5)
                        return -EINVAL;
                return current->no_new_privs ? 1 : 0;
+       case PR_SET_VMA:
+               error = prctl_set_vma(arg2, arg3, arg4, arg5);
+               break;
        default:
                error = -EINVAL;
                break;
index 9469f4c61a30ae67c50a4335a517f4d8074c7015..34b8f77b4c0212662448a102706b37f30b01cdb7 100644 (file)
@@ -105,6 +105,8 @@ extern char core_pattern[];
 extern unsigned int core_pipe_limit;
 #endif
 extern int pid_max;
+extern int extra_free_kbytes;
+extern int min_free_order_shift;
 extern int pid_max_min, pid_max_max;
 extern int percpu_pagelist_fraction;
 extern int compat_log;
@@ -1281,6 +1283,21 @@ static struct ctl_table vm_table[] = {
                .proc_handler   = min_free_kbytes_sysctl_handler,
                .extra1         = &zero,
        },
+       {
+               .procname       = "extra_free_kbytes",
+               .data           = &extra_free_kbytes,
+               .maxlen         = sizeof(extra_free_kbytes),
+               .mode           = 0644,
+               .proc_handler   = min_free_kbytes_sysctl_handler,
+               .extra1         = &zero,
+       },
+       {
+               .procname       = "min_free_order_shift",
+               .data           = &min_free_order_shift,
+               .maxlen         = sizeof(min_free_order_shift),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec
+       },
        {
                .procname       = "percpu_pagelist_fraction",
                .data           = &percpu_pagelist_fraction,
index 294bf4ef1f47690151d6d3261e45a32284c3f349..28ef74529cb571d6077230e918a0a38f513e25c1 100644 (file)
@@ -199,6 +199,12 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
 
 }
 
+ktime_t alarm_expires_remaining(const struct alarm *alarm)
+{
+       struct alarm_base *base = &alarm_bases[alarm->type];
+       return ktime_sub(alarm->node.expires, base->gettime());
+}
+
 #ifdef CONFIG_RTC_CLASS
 /**
  * alarmtimer_suspend - Suspend time callback
@@ -305,7 +311,7 @@ void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
 }
 
 /**
- * alarm_start - Sets an alarm to fire
+ * alarm_start - Sets an absolute alarm to fire
  * @alarm: ptr to alarm to set
  * @start: time to run the alarm
  */
@@ -324,6 +330,31 @@ int alarm_start(struct alarm *alarm, ktime_t start)
        return ret;
 }
 
+/**
+ * alarm_start_relative - Sets a relative alarm to fire
+ * @alarm: ptr to alarm to set
+ * @start: time relative to now to run the alarm
+ */
+int alarm_start_relative(struct alarm *alarm, ktime_t start)
+{
+       struct alarm_base *base = &alarm_bases[alarm->type];
+
+       start = ktime_add(start, base->gettime());
+       return alarm_start(alarm, start);
+}
+
+void alarm_restart(struct alarm *alarm)
+{
+       struct alarm_base *base = &alarm_bases[alarm->type];
+       unsigned long flags;
+
+       spin_lock_irqsave(&base->lock, flags);
+       hrtimer_set_expires(&alarm->timer, alarm->node.expires);
+       hrtimer_restart(&alarm->timer);
+       alarmtimer_enqueue(base, alarm);
+       spin_unlock_irqrestore(&base->lock, flags);
+}
+
 /**
  * alarm_try_to_cancel - Tries to cancel an alarm timer
  * @alarm: ptr to alarm to be canceled
@@ -394,6 +425,12 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
        return overrun;
 }
 
+u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
+{
+       struct alarm_base *base = &alarm_bases[alarm->type];
+
+       return alarm_forward(alarm, base->gettime(), interval);
+}
 
 
 
index 015f85aaca08f5f5d6eb55af1f1aab46670bb03b..e24c188cbbcce3be975cbfb1be94e31a701c781c 100644 (file)
@@ -82,6 +82,9 @@ config EVENT_TRACING
        select CONTEXT_SWITCH_TRACER
        bool
 
+config GPU_TRACEPOINTS
+       bool
+
 config CONTEXT_SWITCH_TRACER
        bool
 
index d7e2068e4b71ce98184c2f7c24bd470c457f2dae..45012122fbb6cb30eb42e62d57c1aa4a293cd64b 100644 (file)
@@ -60,5 +60,6 @@ obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
 endif
 obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
 obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o
+obj-$(CONFIG_GPU_TRACEPOINTS) += gpu-traces.o
 
 libftrace-y := ftrace.o
diff --git a/kernel/trace/gpu-traces.c b/kernel/trace/gpu-traces.c
new file mode 100644 (file)
index 0000000..a4b3f00
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * GPU tracepoints
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/gpu.h>
+
+EXPORT_TRACEPOINT_SYMBOL(gpu_sched_switch);
+EXPORT_TRACEPOINT_SYMBOL(gpu_job_enqueue);
index 18cdf91b2f853d34ab09c1ae90f1d7c0ba2e300a..3bf9864c313e17c370023547a7cccda69fedcbf2 100644 (file)
@@ -730,6 +730,7 @@ static const char *trace_options[] = {
        "irq-info",
        "markers",
        "function-trace",
+       "print-tgid",
        NULL
 };
 
@@ -1242,6 +1243,7 @@ void tracing_reset_all_online_cpus(void)
 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
+static unsigned saved_tgids[SAVED_CMDLINES];
 static int cmdline_idx;
 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 
@@ -1443,6 +1445,7 @@ static int trace_save_cmdline(struct task_struct *tsk)
        }
 
        memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
+       saved_tgids[idx] = tsk->tgid;
 
        arch_spin_unlock(&trace_cmdline_lock);
 
@@ -1480,6 +1483,25 @@ void trace_find_cmdline(int pid, char comm[])
        preempt_enable();
 }
 
+int trace_find_tgid(int pid)
+{
+       unsigned map;
+       int tgid;
+
+       preempt_disable();
+       arch_spin_lock(&trace_cmdline_lock);
+       map = map_pid_to_cmdline[pid];
+       if (map != NO_CMDLINE_MAP)
+               tgid = saved_tgids[map];
+       else
+               tgid = -1;
+
+       arch_spin_unlock(&trace_cmdline_lock);
+       preempt_enable();
+
+       return tgid;
+}
+
 void tracing_record_cmdline(struct task_struct *tsk)
 {
        if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
@@ -2435,6 +2457,13 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
        seq_puts(m, "#              | |       |          |         |\n");
 }
 
+static void print_func_help_header_tgid(struct trace_buffer *buf, struct seq_file *m)
+{
+       print_event_info(buf, m);
+       seq_puts(m, "#           TASK-PID    TGID   CPU#      TIMESTAMP  FUNCTION\n");
+       seq_puts(m, "#              | |        |      |          |         |\n");
+}
+
 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
 {
        print_event_info(buf, m);
@@ -2447,6 +2476,18 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
        seq_puts(m, "#              | |       |   ||||       |         |\n");
 }
 
+static void print_func_help_header_irq_tgid(struct trace_buffer *buf, struct seq_file *m)
+{
+       print_event_info(buf, m);
+       seq_puts(m, "#                                      _-----=> irqs-off\n");
+       seq_puts(m, "#                                     / _----=> need-resched\n");
+       seq_puts(m, "#                                    | / _---=> hardirq/softirq\n");
+       seq_puts(m, "#                                    || / _--=> preempt-depth\n");
+       seq_puts(m, "#                                    ||| /     delay\n");
+       seq_puts(m, "#           TASK-PID    TGID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
+       seq_puts(m, "#              | |        |      |   ||||       |         |\n");
+}
+
 void
 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
 {
@@ -2747,9 +2788,15 @@ void trace_default_header(struct seq_file *m)
        } else {
                if (!(trace_flags & TRACE_ITER_VERBOSE)) {
                        if (trace_flags & TRACE_ITER_IRQ_INFO)
-                               print_func_help_header_irq(iter->trace_buffer, m);
+                               if (trace_flags & TRACE_ITER_TGID)
+                                       print_func_help_header_irq_tgid(iter->trace_buffer, m);
+                               else
+                                       print_func_help_header_irq(iter->trace_buffer, m);
                        else
-                               print_func_help_header(iter->trace_buffer, m);
+                               if (trace_flags & TRACE_ITER_TGID)
+                                       print_func_help_header_tgid(iter->trace_buffer, m);
+                               else
+                                       print_func_help_header(iter->trace_buffer, m);
                }
        }
 }
@@ -3601,9 +3648,53 @@ tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
 }
 
 static const struct file_operations tracing_saved_cmdlines_fops = {
-    .open       = tracing_open_generic,
-    .read       = tracing_saved_cmdlines_read,
-    .llseek    = generic_file_llseek,
+       .open   = tracing_open_generic,
+       .read   = tracing_saved_cmdlines_read,
+       .llseek = generic_file_llseek,
+};
+
+static ssize_t
+tracing_saved_tgids_read(struct file *file, char __user *ubuf,
+                               size_t cnt, loff_t *ppos)
+{
+       char *file_buf;
+       char *buf;
+       int len = 0;
+       int pid;
+       int i;
+
+       file_buf = kmalloc(SAVED_CMDLINES*(16+1+16), GFP_KERNEL);
+       if (!file_buf)
+               return -ENOMEM;
+
+       buf = file_buf;
+
+       for (i = 0; i < SAVED_CMDLINES; i++) {
+               int tgid;
+               int r;
+
+               pid = map_cmdline_to_pid[i];
+               if (pid == -1 || pid == NO_CMDLINE_MAP)
+                       continue;
+
+               tgid = trace_find_tgid(pid);
+               r = sprintf(buf, "%d %d\n", pid, tgid);
+               buf += r;
+               len += r;
+       }
+
+       len = simple_read_from_buffer(ubuf, cnt, ppos,
+                                     file_buf, len);
+
+       kfree(file_buf);
+
+       return len;
+}
+
+static const struct file_operations tracing_saved_tgids_fops = {
+       .open   = tracing_open_generic,
+       .read   = tracing_saved_tgids_read,
+       .llseek = generic_file_llseek,
 };
 
 static ssize_t
@@ -6157,6 +6248,9 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
        trace_create_file("trace_marker", 0220, d_tracer,
                          tr, &tracing_mark_fops);
 
+       trace_create_file("saved_tgids", 0444, d_tracer,
+                         tr, &tracing_saved_tgids_fops);
+
        trace_create_file("trace_clock", 0644, d_tracer, tr,
                          &trace_clock_fops);
 
index aa0e736b72ac84d77a633ab8c83f8d1378883147..79462443658dcb614ec1249d9ab60df32fbbac82 100644 (file)
@@ -653,6 +653,7 @@ static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
 extern cycle_t ftrace_now(int cpu);
 
 extern void trace_find_cmdline(int pid, char comm[]);
+extern int trace_find_tgid(int pid);
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 extern unsigned long ftrace_update_tot_cnt;
@@ -866,6 +867,7 @@ enum trace_iterator_flags {
        TRACE_ITER_IRQ_INFO             = 0x800000,
        TRACE_ITER_MARKERS              = 0x1000000,
        TRACE_ITER_FUNCTION             = 0x2000000,
+       TRACE_ITER_TGID                 = 0x4000000,
 };
 
 /*
index 8388bc99f2eee165efacbef7ddde4f7f21d49ea3..28dd40c2c423e45d5eff084bf397829082986c03 100644 (file)
@@ -46,6 +46,8 @@ struct fgraph_data {
 #define TRACE_GRAPH_PRINT_DURATION     0x10
 #define TRACE_GRAPH_PRINT_ABS_TIME     0x20
 #define TRACE_GRAPH_PRINT_IRQS         0x40
+#define TRACE_GRAPH_PRINT_FLAT         0x80
+
 
 static unsigned int max_depth;
 
@@ -64,6 +66,8 @@ static struct tracer_opt trace_opts[] = {
        { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
        /* Display interrupts */
        { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
+       /* Use standard trace formatting rather than hierarchical */
+       { TRACER_OPT(funcgraph-flat, TRACE_GRAPH_PRINT_FLAT) },
        { } /* Empty entry */
 };
 
@@ -1234,6 +1238,9 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags)
        int cpu = iter->cpu;
        int ret;
 
+       if (flags & TRACE_GRAPH_PRINT_FLAT)
+               return TRACE_TYPE_UNHANDLED;
+
        if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
                per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
                return TRACE_TYPE_HANDLED;
@@ -1291,13 +1298,6 @@ print_graph_function(struct trace_iterator *iter)
        return print_graph_function_flags(iter, tracer_flags.val);
 }
 
-static enum print_line_t
-print_graph_function_event(struct trace_iterator *iter, int flags,
-                          struct trace_event *event)
-{
-       return print_graph_function(iter);
-}
-
 static void print_lat_header(struct seq_file *s, u32 flags)
 {
        static const char spaces[] = "                " /* 16 spaces */
@@ -1364,6 +1364,11 @@ void print_graph_headers_flags(struct seq_file *s, u32 flags)
 {
        struct trace_iterator *iter = s->private;
 
+       if (flags & TRACE_GRAPH_PRINT_FLAT) {
+               trace_default_header(s);
+               return;
+       }
+
        if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
                return;
 
@@ -1434,20 +1439,6 @@ static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
        return 0;
 }
 
-static struct trace_event_functions graph_functions = {
-       .trace          = print_graph_function_event,
-};
-
-static struct trace_event graph_trace_entry_event = {
-       .type           = TRACE_GRAPH_ENT,
-       .funcs          = &graph_functions,
-};
-
-static struct trace_event graph_trace_ret_event = {
-       .type           = TRACE_GRAPH_RET,
-       .funcs          = &graph_functions
-};
-
 static struct tracer graph_trace __read_mostly = {
        .name           = "function_graph",
        .open           = graph_trace_open,
@@ -1523,16 +1514,6 @@ static __init int init_graph_trace(void)
 {
        max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
 
-       if (!register_ftrace_event(&graph_trace_entry_event)) {
-               pr_warning("Warning: could not register graph trace events\n");
-               return 1;
-       }
-
-       if (!register_ftrace_event(&graph_trace_ret_event)) {
-               pr_warning("Warning: could not register graph trace events\n");
-               return 1;
-       }
-
        return register_tracer(&graph_trace);
 }
 
index bb922d9ee51ba7b51171db284f6cd20576b012b2..a68e5e34c00b022669f2b828a756517bf8d2242c 100644 (file)
@@ -702,11 +702,25 @@ int trace_print_context(struct trace_iterator *iter)
        unsigned long secs, usec_rem;
        char comm[TASK_COMM_LEN];
        int ret;
+       int tgid;
 
        trace_find_cmdline(entry->pid, comm);
 
-       ret = trace_seq_printf(s, "%16s-%-5d [%03d] ",
-                              comm, entry->pid, iter->cpu);
+       ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
+       if (!ret)
+               return 0;
+
+       if (trace_flags & TRACE_ITER_TGID) {
+               tgid = trace_find_tgid(entry->pid);
+               if (tgid < 0)
+                       ret = trace_seq_puts(s, "(-----) ");
+               else
+                       ret = trace_seq_printf(s, "(%5d) ", tgid);
+               if (!ret)
+                       return 0;
+       }
+
+       ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
        if (!ret)
                return 0;
 
@@ -1035,6 +1049,168 @@ static struct trace_event trace_fn_event = {
        .funcs          = &trace_fn_funcs,
 };
 
+/* TRACE_GRAPH_ENT */
+static enum print_line_t trace_graph_ent_trace(struct trace_iterator *iter, int flags,
+                                       struct trace_event *event)
+{
+       struct trace_seq *s = &iter->seq;
+       struct ftrace_graph_ent_entry *field;
+
+       trace_assign_type(field, iter->ent);
+
+       if (!trace_seq_puts(s, "graph_ent: func="))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       if (!seq_print_ip_sym(s, field->graph_ent.func, flags))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       if (!trace_seq_puts(s, "\n"))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ent_raw(struct trace_iterator *iter, int flags,
+                                     struct trace_event *event)
+{
+       struct ftrace_graph_ent_entry *field;
+
+       trace_assign_type(field, iter->ent);
+
+       if (!trace_seq_printf(&iter->seq, "%lx %d\n",
+                             field->graph_ent.func,
+                             field->graph_ent.depth))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ent_hex(struct trace_iterator *iter, int flags,
+                                     struct trace_event *event)
+{
+       struct ftrace_graph_ent_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       SEQ_PUT_HEX_FIELD_RET(s, field->graph_ent.func);
+       SEQ_PUT_HEX_FIELD_RET(s, field->graph_ent.depth);
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ent_bin(struct trace_iterator *iter, int flags,
+                                     struct trace_event *event)
+{
+       struct ftrace_graph_ent_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       SEQ_PUT_FIELD_RET(s, field->graph_ent.func);
+       SEQ_PUT_FIELD_RET(s, field->graph_ent.depth);
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event_functions trace_graph_ent_funcs = {
+       .trace          = trace_graph_ent_trace,
+       .raw            = trace_graph_ent_raw,
+       .hex            = trace_graph_ent_hex,
+       .binary         = trace_graph_ent_bin,
+};
+
+static struct trace_event trace_graph_ent_event = {
+       .type           = TRACE_GRAPH_ENT,
+       .funcs          = &trace_graph_ent_funcs,
+};
+
+/* TRACE_GRAPH_RET */
+static enum print_line_t trace_graph_ret_trace(struct trace_iterator *iter, int flags,
+                                       struct trace_event *event)
+{
+       struct trace_seq *s = &iter->seq;
+       struct trace_entry *entry = iter->ent;
+       struct ftrace_graph_ret_entry *field;
+
+       trace_assign_type(field, entry);
+
+       if (!trace_seq_puts(s, "graph_ret: func="))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       if (!seq_print_ip_sym(s, field->ret.func, flags))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       if (!trace_seq_puts(s, "\n"))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ret_raw(struct trace_iterator *iter, int flags,
+                                     struct trace_event *event)
+{
+       struct ftrace_graph_ret_entry *field;
+
+       trace_assign_type(field, iter->ent);
+
+       if (!trace_seq_printf(&iter->seq, "%lx %lld %lld %ld %d\n",
+                             field->ret.func,
+                             field->ret.calltime,
+                             field->ret.rettime,
+                             field->ret.overrun,
+                             field->ret.depth));
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ret_hex(struct trace_iterator *iter, int flags,
+                                     struct trace_event *event)
+{
+       struct ftrace_graph_ret_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       SEQ_PUT_HEX_FIELD_RET(s, field->ret.func);
+       SEQ_PUT_HEX_FIELD_RET(s, field->ret.calltime);
+       SEQ_PUT_HEX_FIELD_RET(s, field->ret.rettime);
+       SEQ_PUT_HEX_FIELD_RET(s, field->ret.overrun);
+       SEQ_PUT_HEX_FIELD_RET(s, field->ret.depth);
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ret_bin(struct trace_iterator *iter, int flags,
+                                     struct trace_event *event)
+{
+       struct ftrace_graph_ret_entry *field;
+       struct trace_seq *s = &iter->seq;
+
+       trace_assign_type(field, iter->ent);
+
+       SEQ_PUT_FIELD_RET(s, field->ret.func);
+       SEQ_PUT_FIELD_RET(s, field->ret.calltime);
+       SEQ_PUT_FIELD_RET(s, field->ret.rettime);
+       SEQ_PUT_FIELD_RET(s, field->ret.overrun);
+       SEQ_PUT_FIELD_RET(s, field->ret.depth);
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event_functions trace_graph_ret_funcs = {
+       .trace          = trace_graph_ret_trace,
+       .raw            = trace_graph_ret_raw,
+       .hex            = trace_graph_ret_hex,
+       .binary         = trace_graph_ret_bin,
+};
+
+static struct trace_event trace_graph_ret_event = {
+       .type           = TRACE_GRAPH_RET,
+       .funcs          = &trace_graph_ret_funcs,
+};
+
 /* TRACE_CTX an TRACE_WAKE */
 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
                                             char *delim)
@@ -1425,6 +1601,8 @@ static struct trace_event trace_print_event = {
 
 static struct trace_event *events[] __initdata = {
        &trace_fn_event,
+       &trace_graph_ent_event,
+       &trace_graph_ret_event,
        &trace_ctx_event,
        &trace_wake_event,
        &trace_stack_event,
index 05039e348f07e242c63392a8758b8371c1a42beb..e092e5a6cdd753a0368215204b33daec16b7c2ba 100644 (file)
@@ -45,6 +45,11 @@ static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
+#endif
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static cpumask_t __read_mostly watchdog_cpus;
+#endif
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
 
@@ -178,7 +183,7 @@ void touch_softlockup_watchdog_sync(void)
        __raw_get_cpu_var(watchdog_touch_ts) = 0;
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 /* watchdog detector functions */
 static int is_hardlockup(void)
 {
@@ -192,6 +197,76 @@ static int is_hardlockup(void)
 }
 #endif
 
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static unsigned int watchdog_next_cpu(unsigned int cpu)
+{
+       cpumask_t cpus = watchdog_cpus;
+       unsigned int next_cpu;
+
+       next_cpu = cpumask_next(cpu, &cpus);
+       if (next_cpu >= nr_cpu_ids)
+               next_cpu = cpumask_first(&cpus);
+
+       if (next_cpu == cpu)
+               return nr_cpu_ids;
+
+       return next_cpu;
+}
+
+static int is_hardlockup_other_cpu(unsigned int cpu)
+{
+       unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
+
+       if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
+               return 1;
+
+       per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
+       return 0;
+}
+
+static void watchdog_check_hardlockup_other_cpu(void)
+{
+       unsigned int next_cpu;
+
+       /*
+        * Test for hardlockups every 3 samples.  The sample period is
+        *  watchdog_thresh * 2 / 5, so 3 samples gets us back to slightly over
+        *  watchdog_thresh (over by 20%).
+        */
+       if (__this_cpu_read(hrtimer_interrupts) % 3 != 0)
+               return;
+
+       /* check for a hardlockup on the next cpu */
+       next_cpu = watchdog_next_cpu(smp_processor_id());
+       if (next_cpu >= nr_cpu_ids)
+               return;
+
+       smp_rmb();
+
+       if (per_cpu(watchdog_nmi_touch, next_cpu) == true) {
+               per_cpu(watchdog_nmi_touch, next_cpu) = false;
+               return;
+       }
+
+       if (is_hardlockup_other_cpu(next_cpu)) {
+               /* only warn once */
+               if (per_cpu(hard_watchdog_warn, next_cpu) == true)
+                       return;
+
+               if (hardlockup_panic)
+                       panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu);
+               else
+                       WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu);
+
+               per_cpu(hard_watchdog_warn, next_cpu) = true;
+       } else {
+               per_cpu(hard_watchdog_warn, next_cpu) = false;
+       }
+}
+#else
+static inline void watchdog_check_hardlockup_other_cpu(void) { return; }
+#endif
+
 static int is_softlockup(unsigned long touch_ts)
 {
        unsigned long now = get_timestamp();
@@ -203,7 +278,7 @@ static int is_softlockup(unsigned long touch_ts)
        return 0;
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 
 static struct perf_event_attr wd_hw_attr = {
        .type           = PERF_TYPE_HARDWARE,
@@ -251,7 +326,7 @@ static void watchdog_overflow_callback(struct perf_event *event,
        __this_cpu_write(hard_watchdog_warn, false);
        return;
 }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
 
 static void watchdog_interrupt_count(void)
 {
@@ -271,6 +346,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        /* kick the hardlockup detector */
        watchdog_interrupt_count();
 
+       /* test for hardlockups on the next cpu */
+       watchdog_check_hardlockup_other_cpu();
+
        /* kick the softlockup detector */
        wake_up_process(__this_cpu_read(softlockup_watchdog));
 
@@ -395,7 +473,7 @@ static void watchdog(unsigned int cpu)
        __touch_watchdog();
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 /*
  * People like the simple clean cpu node info on boot.
  * Reduce the watchdog noise by only printing messages
@@ -471,9 +549,44 @@ static void watchdog_nmi_disable(unsigned int cpu)
        return;
 }
 #else
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static int watchdog_nmi_enable(unsigned int cpu)
+{
+       /*
+        * The new cpu will be marked online before the first hrtimer interrupt
+        * runs on it.  If another cpu tests for a hardlockup on the new cpu
+        * before it has run its first hrtimer, it will get a false positive.
+        * Touch the watchdog on the new cpu to delay the first check for at
+        * least 3 sampling periods to guarantee one hrtimer has run on the new
+        * cpu.
+        */
+       per_cpu(watchdog_nmi_touch, cpu) = true;
+       smp_wmb();
+       cpumask_set_cpu(cpu, &watchdog_cpus);
+       return 0;
+}
+
+static void watchdog_nmi_disable(unsigned int cpu)
+{
+       unsigned int next_cpu = watchdog_next_cpu(cpu);
+
+       /*
+        * Offlining this cpu will cause the cpu before this one to start
+        * checking the one after this one.  If this cpu just finished checking
+        * the next cpu and updating hrtimer_interrupts_saved, and then the
+        * previous cpu checks it within one sample period, it will trigger a
+        * false positive.  Touch the watchdog on the next cpu to prevent it.
+        */
+       if (next_cpu < nr_cpu_ids)
+               per_cpu(watchdog_nmi_touch, next_cpu) = true;
+       smp_wmb();
+       cpumask_clear_cpu(cpu, &watchdog_cpus);
+}
+#else
 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
 static void watchdog_nmi_disable(unsigned int cpu) { return; }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
 
 /* prepare/enable/disable routines */
 /* sysctl functions */
index 74fdc5cf4adc44cc755e2dab5018a61df288e078..d317c1ad62ab0c747e9d77dfb8eda6d813581397 100644 (file)
@@ -191,15 +191,27 @@ config LOCKUP_DETECTOR
          The overhead should be minimal.  A periodic hrtimer runs to
          generate interrupts and kick the watchdog task every 4 seconds.
          An NMI is generated every 10 seconds or so to check for hardlockups.
+         If NMIs are not available on the platform, every 12 seconds the
+         hrtimer interrupt on one cpu will be used to check for hardlockups
+         on the next cpu.
 
          The frequency of hrtimer and NMI events and the soft and hard lockup
          thresholds can be controlled through the sysctl watchdog_thresh.
 
-config HARDLOCKUP_DETECTOR
+config HARDLOCKUP_DETECTOR_NMI
        def_bool y
        depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
        depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
 
+config HARDLOCKUP_DETECTOR_OTHER_CPU
+       def_bool y
+       depends on LOCKUP_DETECTOR && SMP
+       depends on !HARDLOCKUP_DETECTOR_NMI && !HAVE_NMI_WATCHDOG
+
+config HARDLOCKUP_DETECTOR
+       def_bool y
+       depends on HARDLOCKUP_DETECTOR_NMI || HARDLOCKUP_DETECTOR_OTHER_CPU
+
 config BOOTPARAM_HARDLOCKUP_PANIC
        bool "Panic (Reboot) On Hard Lockups"
        depends on HARDLOCKUP_DETECTOR
@@ -669,8 +681,9 @@ config DEBUG_LOCKING_API_SELFTESTS
          mutexes and rwsems.
 
 config STACKTRACE
-       bool
+       bool "Stacktrace"
        depends on STACKTRACE_SUPPORT
+       default y
 
 config DEBUG_STACK_USAGE
        bool "Stack utilization instrumentation"
index 7055883e6e250f2914ed5c0a7814400e4945c9d1..85d1a5427c27faeff4f4d8d3b52cba058685022f 100644 (file)
@@ -102,7 +102,8 @@ static long madvise_behavior(struct vm_area_struct * vma,
 
        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
        *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
-                               vma->vm_file, pgoff, vma_policy(vma));
+                               vma->vm_file, pgoff, vma_policy(vma),
+                               vma_get_anon_name(vma));
        if (*prev) {
                vma = *prev;
                goto success;
index b2061bb5af73b0d589fbf3bb2a1615e7e798cb27..420e4b97ffab5853d6e543080c612b341a171ce3 100644 (file)
@@ -725,7 +725,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
                        ((vmstart - vma->vm_start) >> PAGE_SHIFT);
                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
                                  vma->anon_vma, vma->vm_file, pgoff,
-                                 new_pol);
+                                 new_pol, vma_get_anon_name(vma));
                if (prev) {
                        vma = prev;
                        next = vma->vm_next;
index 713e462c077637664c80287ab6e864ff54b2eaab..3dcea72277be8988ce2df2f185eadb2fc8b05d7a 100644 (file)
@@ -289,7 +289,8 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
 
        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
        *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
-                         vma->vm_file, pgoff, vma_policy(vma));
+                         vma->vm_file, pgoff, vma_policy(vma),
+                         vma_get_anon_name(vma));
        if (*prev) {
                vma = *prev;
                goto success;
index 8f87b14c796839a99851af83de6ee15f48f0b16d..9aa554b7e620e1c0596a45035fbf0ebc3372dd0e 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -893,7 +893,8 @@ again:                      remove_next = 1 + (end > next->vm_end);
  * per-vma resources, so we don't attempt to merge those.
  */
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
-                       struct file *file, unsigned long vm_flags)
+                       struct file *file, unsigned long vm_flags,
+                       const char __user *anon_name)
 {
        if (vma->vm_flags ^ vm_flags)
                return 0;
@@ -901,6 +902,8 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
                return 0;
        if (vma->vm_ops && vma->vm_ops->close)
                return 0;
+       if (vma_get_anon_name(vma) != anon_name)
+               return 0;
        return 1;
 }
 
@@ -931,9 +934,10 @@ static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
  */
 static int
 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
-       struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+       struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
+       const char __user *anon_name)
 {
-       if (is_mergeable_vma(vma, file, vm_flags) &&
+       if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                if (vma->vm_pgoff == vm_pgoff)
                        return 1;
@@ -950,9 +954,10 @@ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
  */
 static int
 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
-       struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+       struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
+       const char __user *anon_name)
 {
-       if (is_mergeable_vma(vma, file, vm_flags) &&
+       if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                pgoff_t vm_pglen;
                vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
@@ -963,9 +968,9 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
 }
 
 /*
- * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
- * whether that can be merged with its predecessor or its successor.
- * Or both (it neatly fills a hole).
+ * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
+ * figure out whether that can be merged with its predecessor or its
+ * successor.  Or both (it neatly fills a hole).
  *
  * In most cases - when called for mmap, brk or mremap - [addr,end) is
  * certain not to be mapped by the time vma_merge is called; but when
@@ -995,7 +1000,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                        struct vm_area_struct *prev, unsigned long addr,
                        unsigned long end, unsigned long vm_flags,
                        struct anon_vma *anon_vma, struct file *file,
-                       pgoff_t pgoff, struct mempolicy *policy)
+                       pgoff_t pgoff, struct mempolicy *policy,
+                       const char __user *anon_name)
 {
        pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
        struct vm_area_struct *area, *next;
@@ -1021,15 +1027,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
         */
        if (prev && prev->vm_end == addr &&
                        mpol_equal(vma_policy(prev), policy) &&
-                       can_vma_merge_after(prev, vm_flags,
-                                               anon_vma, file, pgoff)) {
+                       can_vma_merge_after(prev, vm_flags, anon_vma,
+                                               file, pgoff, anon_name)) {
                /*
                 * OK, it can.  Can we now merge in the successor as well?
                 */
                if (next && end == next->vm_start &&
                                mpol_equal(policy, vma_policy(next)) &&
-                               can_vma_merge_before(next, vm_flags,
-                                       anon_vma, file, pgoff+pglen) &&
+                               can_vma_merge_before(next, vm_flags, anon_vma,
+                                               file, pgoff+pglen, anon_name) &&
                                is_mergeable_anon_vma(prev->anon_vma,
                                                      next->anon_vma, NULL)) {
                                                        /* cases 1, 6 */
@@ -1049,8 +1055,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
         */
        if (next && end == next->vm_start &&
                        mpol_equal(policy, vma_policy(next)) &&
-                       can_vma_merge_before(next, vm_flags,
-                                       anon_vma, file, pgoff+pglen)) {
+                       can_vma_merge_before(next, vm_flags, anon_vma,
+                                       file, pgoff+pglen, anon_name)) {
                if (prev && addr < prev->vm_end)        /* case 4 */
                        err = vma_adjust(prev, prev->vm_start,
                                addr, prev->vm_pgoff, NULL);
@@ -1519,7 +1525,8 @@ munmap_back:
        /*
         * Can we just expand an old mapping?
         */
-       vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
+       vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff,
+                       NULL, NULL);
        if (vma)
                goto out;
 
@@ -2663,7 +2670,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
 
        /* Can we just expand an old private anonymous mapping? */
        vma = vma_merge(mm, prev, addr, addr + len, flags,
-                                       NULL, NULL, pgoff, NULL);
+                                       NULL, NULL, pgoff, NULL, NULL);
        if (vma)
                goto out;
 
@@ -2821,7 +2828,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
        if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
                return NULL;    /* should never get here */
        new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
-                       vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
+                       vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
+                       vma_get_anon_name(vma));
        if (new_vma) {
                /*
                 * Source vma may have been merged into new_vma
index e9f65aaa318261320cdfd11baed541c52634b9f3..3c44c971f9f34b631af0fe239d1c0c0db7b3c23f 100644 (file)
@@ -278,7 +278,8 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
         */
        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
        *pprev = vma_merge(mm, *pprev, start, end, newflags,
-                       vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
+                       vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
+                       vma_get_anon_name(vma));
        if (*pprev) {
                vma = *pprev;
                goto success;
index 71305c6aba5bb49c8fa4b901181b2de28dfb02b4..83fa999d1de1f8578ef287ee46d1a81043ba5b28 100644 (file)
@@ -196,7 +196,20 @@ static char * const zone_names[MAX_NR_ZONES] = {
         "Movable",
 };
 
+/*
+ * Try to keep at least this much lowmem free.  Do not allow normal
+ * allocations below this point, only high priority ones. Automatically
+ * tuned according to the amount of memory in the system.
+ */
 int min_free_kbytes = 1024;
+int min_free_order_shift = 1;
+
+/*
+ * Extra memory for the system to try freeing. Used to temporarily
+ * free memory, to make space for new workloads. Anyone can allocate
+ * down to the min watermarks controlled by min_free_kbytes above.
+ */
+int extra_free_kbytes = 0;
 
 static unsigned long __meminitdata nr_kernel_pages;
 static unsigned long __meminitdata nr_all_pages;
@@ -1650,7 +1663,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
                free_pages -= z->free_area[o].nr_free << o;
 
                /* Require fewer higher order pages to be free */
-               min >>= 1;
+               min >>= min_free_order_shift;
 
                if (free_pages <= min)
                        return false;
@@ -5322,6 +5335,7 @@ static void setup_per_zone_lowmem_reserve(void)
 static void __setup_per_zone_wmarks(void)
 {
        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
+       unsigned long pages_low = extra_free_kbytes >> (PAGE_SHIFT - 10);
        unsigned long lowmem_pages = 0;
        struct zone *zone;
        unsigned long flags;
@@ -5333,11 +5347,14 @@ static void __setup_per_zone_wmarks(void)
        }
 
        for_each_zone(zone) {
-               u64 tmp;
+               u64 min, low;
 
                spin_lock_irqsave(&zone->lock, flags);
-               tmp = (u64)pages_min * zone->managed_pages;
-               do_div(tmp, lowmem_pages);
+               min = (u64)pages_min * zone->managed_pages;
+               do_div(min, lowmem_pages);
+               low = (u64)pages_low * zone->managed_pages;
+               do_div(low, vm_total_pages);
+
                if (is_highmem(zone)) {
                        /*
                         * __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -5358,11 +5375,13 @@ static void __setup_per_zone_wmarks(void)
                         * If it's a lowmem zone, reserve a number of pages
                         * proportionate to the zone's size.
                         */
-                       zone->watermark[WMARK_MIN] = tmp;
+                       zone->watermark[WMARK_MIN] = min;
                }
 
-               zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
-               zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+               zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) +
+                                       low + (min >> 2);
+               zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) +
+                                       low + (min >> 1);
 
                setup_zone_migrate_reserve(zone);
                spin_unlock_irqrestore(&zone->lock, flags);
@@ -5475,7 +5494,7 @@ module_init(init_per_zone_wmark_min)
 /*
  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
  *     that we can call two helper functions whenever min_free_kbytes
- *     changes.
+ *     or extra_free_kbytes changes.
  */
 int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
        void __user *buffer, size_t *length, loff_t *ppos)
index a8a3ef45fed753b68ac1cc4a94c9260979a37879..ba05b64e5d8ddfc19f31c046f7c0b735d099364b 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/writeback.h>
 #include <linux/frontswap.h>
 #include <linux/aio.h>
+#include <linux/blkdev.h>
 #include <asm/pgtable.h>
 
 static struct bio *get_swap_bio(gfp_t gfp_flags,
@@ -80,9 +81,54 @@ void end_swap_bio_read(struct bio *bio, int err)
                                imajor(bio->bi_bdev->bd_inode),
                                iminor(bio->bi_bdev->bd_inode),
                                (unsigned long long)bio->bi_sector);
-       } else {
-               SetPageUptodate(page);
+               goto out;
        }
+
+       SetPageUptodate(page);
+
+       /*
+        * There is no guarantee that the page is in swap cache - the software
+        * suspend code (at least) uses end_swap_bio_read() against a non-
+        * swapcache page.  So we must check PG_swapcache before proceeding with
+        * this optimization.
+        */
+       if (likely(PageSwapCache(page))) {
+               struct swap_info_struct *sis;
+
+               sis = page_swap_info(page);
+               if (sis->flags & SWP_BLKDEV) {
+                       /*
+                        * The swap subsystem performs lazy swap slot freeing,
+                        * expecting that the page will be swapped out again.
+                        * So we can avoid an unnecessary write if the page
+                        * isn't redirtied.
+                        * This is good for real swap storage because we can
+                        * reduce unnecessary I/O and enhance wear-leveling
+                        * if an SSD is used as the as swap device.
+                        * But if in-memory swap device (eg zram) is used,
+                        * this causes a duplicated copy between uncompressed
+                        * data in VM-owned memory and compressed data in
+                        * zram-owned memory.  So let's free zram-owned memory
+                        * and make the VM-owned decompressed page *dirty*,
+                        * so the page should be swapped out somewhere again if
+                        * we again wish to reclaim it.
+                        */
+                       struct gendisk *disk = sis->bdev->bd_disk;
+                       if (disk->fops->swap_slot_free_notify) {
+                               swp_entry_t entry;
+                               unsigned long offset;
+
+                               entry.val = page_private(page);
+                               offset = swp_offset(entry);
+
+                               SetPageDirty(page);
+                               disk->fops->swap_slot_free_notify(sis->bdev,
+                                               offset);
+                       }
+               }
+       }
+
+out:
        unlock_page(page);
        bio_put(bio);
 }
index 16cc1d77f70a0e6ae0b63c8599300d7a3db00289..19106724cad840904f6121e3a73ceac27d6129ce 100644 (file)
@@ -3024,6 +3024,14 @@ put_memory:
 }
 EXPORT_SYMBOL_GPL(shmem_file_setup);
 
+void shmem_set_file(struct vm_area_struct *vma, struct file *file)
+{
+       if (vma->vm_file)
+               fput(vma->vm_file);
+       vma->vm_file = file;
+       vma->vm_ops = &shmem_vm_ops;
+}
+
 /**
  * shmem_zero_setup - setup a shared anonymous mapping
  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
@@ -3037,10 +3045,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       if (vma->vm_file)
-               fput(vma->vm_file);
-       vma->vm_file = file;
-       vma->vm_ops = &shmem_vm_ops;
+       shmem_set_file(vma, file);
        return 0;
 }
 
index 4e89500391dcbeb6a2ac13e0979e788e9caf0593..ec826b383b625b2840b31af6722bbf61f4d05f2c 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/sysctl.h>
 #include <linux/oom.h>
 #include <linux/prefetch.h>
+#include <linux/debugfs.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -155,6 +156,39 @@ static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
        return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
 }
 
+struct dentry *debug_file;
+
+static int debug_shrinker_show(struct seq_file *s, void *unused)
+{
+       struct shrinker *shrinker;
+       struct shrink_control sc;
+
+       sc.gfp_mask = -1;
+       sc.nr_to_scan = 0;
+
+       down_read(&shrinker_rwsem);
+       list_for_each_entry(shrinker, &shrinker_list, list) {
+               int num_objs;
+
+               num_objs = shrinker->shrink(shrinker, &sc);
+               seq_printf(s, "%pf %d\n", shrinker->shrink, num_objs);
+       }
+       up_read(&shrinker_rwsem);
+       return 0;
+}
+
+static int debug_shrinker_open(struct inode *inode, struct file *file)
+{
+        return single_open(file, debug_shrinker_show, inode->i_private);
+}
+
+static const struct file_operations debug_shrinker_fops = {
+        .open = debug_shrinker_open,
+        .read = seq_read,
+        .llseek = seq_lseek,
+        .release = single_release,
+};
+
 /*
  * Add a shrinker callback to be called from the vm
  */
@@ -167,6 +201,15 @@ void register_shrinker(struct shrinker *shrinker)
 }
 EXPORT_SYMBOL(register_shrinker);
 
+static int __init add_shrinker_debug(void)
+{
+       debugfs_create_file("shrinker", 0644, NULL, NULL,
+                           &debug_shrinker_fops);
+       return 0;
+}
+
+late_initcall(add_shrinker_debug);
+
 /*
  * Remove one
  */
index 2ddc9046868e7a19d06841054696773242099ccf..2a680dadfd6cc7effd3bcc23c34c6ed51764a3b5 100644 (file)
@@ -81,6 +81,20 @@ source "net/netlabel/Kconfig"
 
 endif # if INET
 
+config ANDROID_PARANOID_NETWORK
+       bool "Only allow certain groups to create sockets"
+       default ANDROID
+       help
+               none
+
+config NET_ACTIVITY_STATS
+       bool "Network activity statistics tracking"
+       default ANDROID
+       help
+        Network activity statistics are useful for tracking wireless
+        modem activity on 2G, 3G, 4G wireless networks. Counts number of
+        transmissions and groups them in specified time buckets.
+
 config NETWORK_SECMARK
        bool "Security Marking"
        help
@@ -220,7 +234,7 @@ source "net/vmw_vsock/Kconfig"
 source "net/netlink/Kconfig"
 
 config RPS
-       boolean
+       boolean "RPS"
        depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
        default y
 
index 091e7b04f301539036dca468d045d0615a33f4b4..67d460aa1c48d9e5aa98fa9b7b5181d3954480fd 100644 (file)
@@ -70,3 +70,4 @@ obj-$(CONFIG_BATMAN_ADV)      += batman-adv/
 obj-$(CONFIG_NFC)              += nfc/
 obj-$(CONFIG_OPENVSWITCH)      += openvswitch/
 obj-$(CONFIG_VSOCKETS) += vmw_vsock/
+obj-$(CONFIG_NET_ACTIVITY_STATS)               += activity_stats.o
diff --git a/net/activity_stats.c b/net/activity_stats.c
new file mode 100644 (file)
index 0000000..4609ce2
--- /dev/null
@@ -0,0 +1,119 @@
+/* net/activity_stats.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ */
+
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/suspend.h>
+#include <net/net_namespace.h>
+
+/*
+ * Track transmission rates in buckets (power of 2).
+ * 1,2,4,8...512 seconds.
+ *
+ * Buckets represent the count of network transmissions at least
+ * N seconds apart, where N is 1 << bucket index.
+ */
+#define BUCKET_MAX 10
+
+/* Track network activity frequency */
+static unsigned long activity_stats[BUCKET_MAX];
+static ktime_t last_transmit;
+static ktime_t suspend_time;
+static DEFINE_SPINLOCK(activity_lock);
+
+void activity_stats_update(void)
+{
+       int i;
+       unsigned long flags;
+       ktime_t now;
+       s64 delta;
+
+       spin_lock_irqsave(&activity_lock, flags);
+       now = ktime_get();
+       delta = ktime_to_ns(ktime_sub(now, last_transmit));
+
+       for (i = BUCKET_MAX - 1; i >= 0; i--) {
+               /*
+                * Check if the time delta between network activity is within the
+                * minimum bucket range.
+                */
+               if (delta < (1000000000ULL << i))
+                       continue;
+
+               activity_stats[i]++;
+               last_transmit = now;
+               break;
+       }
+       spin_unlock_irqrestore(&activity_lock, flags);
+}
+
+static int activity_stats_show(struct seq_file *m, void *v)
+{
+       int i;
+       int ret;
+
+       seq_printf(m, "Min Bucket(sec) Count\n");
+
+       for (i = 0; i < BUCKET_MAX; i++) {
+               ret = seq_printf(m, "%15d %lu\n", 1 << i, activity_stats[i]);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int activity_stats_notifier(struct notifier_block *nb,
+                                       unsigned long event, void *dummy)
+{
+       switch (event) {
+               case PM_SUSPEND_PREPARE:
+                       suspend_time = ktime_get_real();
+                       break;
+
+               case PM_POST_SUSPEND:
+                       suspend_time = ktime_sub(ktime_get_real(), suspend_time);
+                       last_transmit = ktime_sub(last_transmit, suspend_time);
+       }
+
+       return 0;
+}
+
+static int activity_stats_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, activity_stats_show, PDE_DATA(inode));
+}
+
+static const struct file_operations activity_stats_fops = {
+       .open           = activity_stats_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+
+static struct notifier_block activity_stats_notifier_block = {
+       .notifier_call = activity_stats_notifier,
+};
+
+static int  __init activity_stats_init(void)
+{
+       proc_create("activity", S_IRUGO,
+                   init_net.proc_net_stat, &activity_stats_fops);
+       return register_pm_notifier(&activity_stats_notifier_block);
+}
+
+subsys_initcall(activity_stats_init);
+
index 6629cdc134dc1a439a044027d6826bd6dd4b56f3..f7c36826f3f4d70129375c2935f624110e26d2d0 100644 (file)
 #include <net/bluetooth/bluetooth.h>
 #include <linux/proc_fs.h>
 
+#ifndef CONFIG_BT_SOCK_DEBUG
+#undef  BT_DBG
+#define BT_DBG(D...)
+#endif
+
 #define VERSION "2.16"
 
 /* Bluetooth sockets */
@@ -103,11 +108,40 @@ void bt_sock_unregister(int proto)
 }
 EXPORT_SYMBOL(bt_sock_unregister);
 
+#ifdef CONFIG_PARANOID_NETWORK
+static inline int current_has_bt_admin(void)
+{
+       return !current_euid();
+}
+
+static inline int current_has_bt(void)
+{
+       return current_has_bt_admin();
+}
+# else
+static inline int current_has_bt_admin(void)
+{
+       return 1;
+}
+
+static inline int current_has_bt(void)
+{
+       return 1;
+}
+#endif
+
 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
                          int kern)
 {
        int err;
 
+       if (proto == BTPROTO_RFCOMM || proto == BTPROTO_SCO ||
+                       proto == BTPROTO_L2CAP) {
+               if (!current_has_bt())
+                       return -EPERM;
+       } else if (!current_has_bt_admin())
+               return -EPERM;
+
        if (net != &init_net)
                return -EAFNOSUPPORT;
 
index d459ed43c779d776e453634db290d08f48d5a7c7..a3f3380c2095078ae4e348dce2618e82642947c1 100644 (file)
@@ -113,7 +113,7 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
        bdaddr_t *dst = mgr->l2cap_conn->dst;
        struct hci_conn *hcon;
 
-       hcon = hci_conn_add(hdev, AMP_LINK, dst);
+       hcon = hci_conn_add(hdev, AMP_LINK, 0, dst);
        if (!hcon)
                return NULL;
 
index 8e7290aea8f81f6f2fb69f97d4c5b07f0047b004..c9b2d5011fe4d12370ce70a7cbcb3ebf51dc691e 100644 (file)
@@ -354,7 +354,8 @@ static void hci_conn_auto_accept(unsigned long arg)
                     &conn->dst);
 }
 
-struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
+struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type,
+                                       __u16 pkt_type, bdaddr_t *dst)
 {
        struct hci_conn *conn;
 
@@ -382,14 +383,22 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
                conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
                break;
        case SCO_LINK:
-               if (lmp_esco_capable(hdev))
-                       conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
-                                       (hdev->esco_type & EDR_ESCO_MASK);
-               else
-                       conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
-               break;
+               if (!pkt_type)
+                       pkt_type = SCO_ESCO_MASK;
        case ESCO_LINK:
-               conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
+               if (!pkt_type)
+                       pkt_type = ALL_ESCO_MASK;
+               if (lmp_esco_capable(hdev)) {
+                       /* HCI Setup Synchronous Connection Command uses
+                          reverse logic on the EDR_ESCO_MASK bits */
+                       conn->pkt_type = (pkt_type ^ EDR_ESCO_MASK) &
+                                       hdev->esco_type;
+               } else {
+                       /* Legacy HCI Add Sco Connection Command uses a
+                          shifted bitmask */
+                       conn->pkt_type = (pkt_type << 5) & hdev->pkt_type &
+                                       SCO_PTYPE_MASK;
+               }
                break;
        }
 
@@ -520,7 +529,7 @@ static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                if (le)
                        return ERR_PTR(-EBUSY);
 
-               le = hci_conn_add(hdev, LE_LINK, dst);
+               le = hci_conn_add(hdev, LE_LINK, 0, dst);
                if (!le)
                        return ERR_PTR(-ENOMEM);
 
@@ -543,7 +552,7 @@ static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
 
        acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
        if (!acl) {
-               acl = hci_conn_add(hdev, ACL_LINK, dst);
+               acl = hci_conn_add(hdev, ACL_LINK, 0, dst);
                if (!acl)
                        return ERR_PTR(-ENOMEM);
        }
@@ -561,7 +570,8 @@ static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
 }
 
 static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
-                               bdaddr_t *dst, u8 sec_level, u8 auth_type)
+                                       __u16 pkt_type, bdaddr_t *dst,
+                                       u8 sec_level, u8 auth_type)
 {
        struct hci_conn *acl;
        struct hci_conn *sco;
@@ -572,7 +582,7 @@ static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
 
        sco = hci_conn_hash_lookup_ba(hdev, type, dst);
        if (!sco) {
-               sco = hci_conn_add(hdev, type, dst);
+               sco = hci_conn_add(hdev, type, pkt_type, dst);
                if (!sco) {
                        hci_conn_drop(acl);
                        return ERR_PTR(-ENOMEM);
@@ -602,7 +612,8 @@ static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
 }
 
 /* Create SCO, ACL or LE connection. */
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type,
+                            __u16 pkt_type, bdaddr_t *dst,
                             __u8 dst_type, __u8 sec_level, __u8 auth_type)
 {
        BT_DBG("%s dst %pMR type 0x%x", hdev->name, dst, type);
@@ -614,7 +625,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
                return hci_connect_acl(hdev, dst, sec_level, auth_type);
        case SCO_LINK:
        case ESCO_LINK:
-               return hci_connect_sco(hdev, type, dst, sec_level, auth_type);
+               return hci_connect_sco(hdev, type, pkt_type, dst, sec_level, auth_type);
        }
 
        return ERR_PTR(-EINVAL);
@@ -883,6 +894,15 @@ int hci_get_conn_list(void __user *arg)
                (ci + n)->out   = c->out;
                (ci + n)->state = c->state;
                (ci + n)->link_mode = c->link_mode;
+               if (c->type == SCO_LINK) {
+                       (ci + n)->mtu = hdev->sco_mtu;
+                       (ci + n)->cnt = hdev->sco_cnt;
+                       (ci + n)->pkts = hdev->sco_pkts;
+               } else {
+                       (ci + n)->mtu = hdev->acl_mtu;
+                       (ci + n)->cnt = hdev->acl_cnt;
+                       (ci + n)->pkts = hdev->acl_pkts;
+               }
                if (++n >= req.conn_num)
                        break;
        }
@@ -919,6 +939,15 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
                ci.out   = conn->out;
                ci.state = conn->state;
                ci.link_mode = conn->link_mode;
+               if (req.type == SCO_LINK) {
+                       ci.mtu = hdev->sco_mtu;
+                       ci.cnt = hdev->sco_cnt;
+                       ci.pkts = hdev->sco_pkts;
+               } else {
+                       ci.mtu = hdev->acl_mtu;
+                       ci.cnt = hdev->acl_cnt;
+                       ci.pkts = hdev->acl_pkts;
+               }
        }
        hci_dev_unlock(hdev);
 
old mode 100644 (file)
new mode 100755 (executable)
index 5daf7ab..1526fb2
@@ -1119,7 +1119,7 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
                }
        } else {
                if (!conn) {
-                       conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
+                       conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr);
                        if (conn) {
                                conn->out = true;
                                conn->link_mode |= HCI_LM_MASTER;
@@ -1748,6 +1748,15 @@ unlock:
        hci_conn_check_pending(hdev);
 }
 
+static inline bool is_sco_active(struct hci_dev *hdev)
+{
+       if (hci_conn_hash_lookup_state(hdev, SCO_LINK, BT_CONNECTED) ||
+                       (hci_conn_hash_lookup_state(hdev, ESCO_LINK,
+                                                   BT_CONNECTED)))
+               return true;
+       return false;
+}
+
 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_conn_request *ev = (void *) skb->data;
@@ -1775,7 +1784,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
                conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
                                               &ev->bdaddr);
                if (!conn) {
-                       conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
+                       /* pkt_type not yet used for incoming connections */
+                       conn = hci_conn_add(hdev, ev->link_type, 0, &ev->bdaddr);
                        if (!conn) {
                                BT_ERR("No memory for new connection");
                                hci_dev_unlock(hdev);
@@ -1794,7 +1804,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
                        bacpy(&cp.bdaddr, &ev->bdaddr);
 
-                       if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
+                       if (lmp_rswitch_capable(hdev) && ((mask & HCI_LM_MASTER)
+                                               || is_sco_active(hdev)))
                                cp.role = 0x00; /* Become master */
                        else
                                cp.role = 0x01; /* Remain slave */
@@ -2963,6 +2974,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
                hci_conn_add_sysfs(conn);
                break;
 
+       case 0x10:      /* Connection Accept Timeout */
        case 0x11:      /* Unsupported Feature or Parameter Value */
        case 0x1c:      /* SCO interval rejected */
        case 0x1a:      /* Unsupported Remote Feature */
@@ -3540,7 +3552,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
        if (!conn) {
-               conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
+               conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr);
                if (!conn) {
                        BT_ERR("No memory for new connection");
                        goto unlock;
index 68843a28a7af6a3e52f92f0cde033e03b031560a..c11a28bae844fc80e91ad61e21ff4d64737945d2 100644 (file)
@@ -1793,10 +1793,10 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
        auth_type = l2cap_get_auth_type(chan);
 
        if (chan->dcid == L2CAP_CID_LE_DATA)
-               hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
+               hcon = hci_connect(hdev, LE_LINK, 0, dst, dst_type,
                                   chan->sec_level, auth_type);
        else
-               hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
+               hcon = hci_connect(hdev, ACL_LINK, 0, dst, dst_type,
                                   chan->sec_level, auth_type);
 
        if (IS_ERR(hcon)) {
index 8208a13a983768d5eee65eb5c470206c6e9f67ee..3e574540b2c2fab568c07e768f03bca4a40fbb17 100644 (file)
@@ -2205,10 +2205,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                auth_type = HCI_AT_DEDICATED_BONDING_MITM;
 
        if (cp->addr.type == BDADDR_BREDR)
-               conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
+               conn = hci_connect(hdev, ACL_LINK, 0, &cp->addr.bdaddr,
                                   cp->addr.type, sec_level, auth_type);
        else
-               conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
+               conn = hci_connect(hdev, LE_LINK, 0, &cp->addr.bdaddr,
                                   cp->addr.type, sec_level, auth_type);
 
        if (IS_ERR(conn)) {
index ca957d34b0c89fa29341a179ee16e325ac226e55..0c77476d33d2043f388da49a7fe8545418760e96 100644 (file)
@@ -436,7 +436,6 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
 
        switch (d->state) {
        case BT_CONNECT:
-       case BT_CONFIG:
                if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
                        set_bit(RFCOMM_AUTH_REJECT, &d->flags);
                        rfcomm_schedule();
index 2bb1d3a5e76b3dcc17722813e3fa1f587a8ffae1..3178c7b4a17148f30fa811d80053afb8309bdb3a 100644 (file)
@@ -158,6 +158,7 @@ static int sco_connect(struct sock *sk)
 {
        bdaddr_t *src = &bt_sk(sk)->src;
        bdaddr_t *dst = &bt_sk(sk)->dst;
+       __u16 pkt_type = sco_pi(sk)->pkt_type;
        struct sco_conn *conn;
        struct hci_conn *hcon;
        struct hci_dev  *hdev;
@@ -173,11 +174,13 @@ static int sco_connect(struct sock *sk)
 
        if (lmp_esco_capable(hdev) && !disable_esco)
                type = ESCO_LINK;
-       else
+       else {
                type = SCO_LINK;
+               pkt_type &= SCO_ESCO_MASK;
+       }
 
-       hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW,
-                          HCI_AT_NO_BONDING);
+       hcon = hci_connect(hdev, type, pkt_type, dst, BDADDR_BREDR,
+                          BT_SECURITY_LOW, HCI_AT_NO_BONDING);
        if (IS_ERR(hcon)) {
                err = PTR_ERR(hcon);
                goto done;
@@ -445,17 +448,21 @@ static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
        return 0;
 }
 
-static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
 {
-       struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
+       struct sockaddr_sco sa;
        struct sock *sk = sock->sk;
-       int err = 0;
+       int len, err = 0;
 
-       BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
+       BT_DBG("sk %p %pMR", sk, &sa.sco_bdaddr);
 
        if (!addr || addr->sa_family != AF_BLUETOOTH)
                return -EINVAL;
 
+       memset(&sa, 0, sizeof(sa));
+       len = min_t(unsigned int, sizeof(sa), alen);
+       memcpy(&sa, addr, len);
+
        lock_sock(sk);
 
        if (sk->sk_state != BT_OPEN) {
@@ -468,7 +475,8 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
                goto done;
        }
 
-       bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
+       bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr);
+       sco_pi(sk)->pkt_type = sa.sco_pkt_type;
 
        sk->sk_state = BT_BOUND;
 
@@ -479,26 +487,34 @@ done:
 
 static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
 {
-       struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
        struct sock *sk = sock->sk;
-       int err;
+       struct sockaddr_sco sa;
+       int len, err;
 
        BT_DBG("sk %p", sk);
 
-       if (alen < sizeof(struct sockaddr_sco) ||
-           addr->sa_family != AF_BLUETOOTH)
+       if (!addr || addr->sa_family != AF_BLUETOOTH)
                return -EINVAL;
 
-       if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
-               return -EBADFD;
-
-       if (sk->sk_type != SOCK_SEQPACKET)
-               return -EINVAL;
+       memset(&sa, 0, sizeof(sa));
+       len = min_t(unsigned int, sizeof(sa), alen);
+       memcpy(&sa, addr, len);
 
        lock_sock(sk);
 
+       if (sk->sk_type != SOCK_SEQPACKET) {
+               err = -EINVAL;
+               goto done;
+       }
+
+       if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
+               err = -EBADFD;
+               goto done;
+       }
+
        /* Set destination address and psm */
-       bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
+       bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr);
+       sco_pi(sk)->pkt_type = sa.sco_pkt_type;
 
        err = sco_connect(sk);
        if (err)
@@ -622,6 +638,7 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len
                bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
        else
                bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
+       sa->sco_pkt_type = sco_pi(sk)->pkt_type;
 
        return 0;
 }
index 967312803e4130f4e27712daed6d9dd9e2a1a9d6..239e0e84f9e6eef6ff5c17f8e6477d8663974aae 100644 (file)
@@ -41,11 +41,6 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 #endif
 
-       u64_stats_update_begin(&brstats->syncp);
-       brstats->tx_packets++;
-       brstats->tx_bytes += skb->len;
-       u64_stats_update_end(&brstats->syncp);
-
        if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
                goto out;
 
@@ -54,6 +49,12 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_reset_mac_header(skb);
        skb_pull(skb, ETH_HLEN);
 
+       u64_stats_update_begin(&brstats->syncp);
+       brstats->tx_packets++;
+       /* Exclude ETH_HLEN from byte stats for consistency with Rx chain */
+       brstats->tx_bytes += skb->len;
+       u64_stats_update_end(&brstats->syncp);
+
        if (is_broadcast_ether_addr(dest))
                br_flood_deliver(br, skb);
        else if (is_multicast_ether_addr(dest)) {
index 089cb9f363871327b027d8ce9246c830256cd456..5a9af0a9b0e673898407a0b68c29c4e9c6d80df1 100644 (file)
@@ -15,6 +15,7 @@ obj-y     := route.o inetpeer.o protocol.o \
 
 obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o
 obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
+obj-$(CONFIG_SYSFS) += sysfs_net_ipv4.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
 obj-$(CONFIG_IP_MROUTE) += ipmr.o
index c4adc319cc2e3968477bb6dabdc437c2d470ff61..f022e0e97dc55fb057560943b680b25264095ca5 100644 (file)
 #include <linux/mroute.h>
 #endif
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+       return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+       return 1;
+}
+#endif
 
 /* The inetsw table contains everything that inet_create needs to
  * build a new socket.
@@ -284,6 +297,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
        int try_loading_module = 0;
        int err;
 
+       if (!current_has_network())
+               return -EACCES;
+
        if (unlikely(!inet_ehash_secret))
                if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
                        build_ehash_secret();
@@ -336,8 +352,7 @@ lookup_protocol:
        }
 
        err = -EPERM;
-       if (sock->type == SOCK_RAW && !kern &&
-           !ns_capable(net->user_ns, CAP_NET_RAW))
+       if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
                goto out_rcu_unlock;
 
        sock->ops = answer->ops;
@@ -905,6 +920,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        case SIOCSIFPFLAGS:
        case SIOCGIFPFLAGS:
        case SIOCSIFFLAGS:
+       case SIOCKILLADDR:
                err = devinet_ioctl(net, cmd, (void __user *)arg);
                break;
        default:
index e40eef4ac6979e69ee78bcb00ea2a4377b52f945..b151e0ac7f275300ac5051b3e41e4356f830ba56 100644 (file)
@@ -59,6 +59,7 @@
 
 #include <net/arp.h>
 #include <net/ip.h>
+#include <net/tcp.h>
 #include <net/route.h>
 #include <net/ip_fib.h>
 #include <net/rtnetlink.h>
@@ -918,6 +919,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
        case SIOCSIFBRDADDR:    /* Set the broadcast address */
        case SIOCSIFDSTADDR:    /* Set the destination address */
        case SIOCSIFNETMASK:    /* Set the netmask for the interface */
+       case SIOCKILLADDR:      /* Nuke all sockets on this address */
                ret = -EPERM;
                if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
                        goto out;
@@ -969,7 +971,8 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
        }
 
        ret = -EADDRNOTAVAIL;
-       if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
+       if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS
+           && cmd != SIOCKILLADDR)
                goto done;
 
        switch (cmd) {
@@ -1096,6 +1099,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
                        inet_insert_ifa(ifa);
                }
                break;
+       case SIOCKILLADDR:      /* Nuke all connections on this address */
+               ret = tcp_nuke_addr(net, (struct sockaddr *) sin);
+               break;
        }
 done:
        rtnl_unlock();
index ea78ef5ac35252aadd12684e2829551ebcf2efac..5af8781b65e13f49c0f3ce90bcfa695cf83756a6 100644 (file)
@@ -337,6 +337,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        struct sock *sk;
        struct inet_sock *inet;
        __be32 daddr, saddr;
+       u32 mark = IP4_REPLY_MARK(net, skb->mark);
 
        if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
                return;
@@ -349,6 +350,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        icmp_param->data.icmph.checksum = 0;
 
        inet->tos = ip_hdr(skb)->tos;
+       sk->sk_mark = mark;
        daddr = ipc.addr = ip_hdr(skb)->saddr;
        saddr = fib_compute_spec_dst(skb);
        ipc.opt = NULL;
@@ -361,6 +363,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        memset(&fl4, 0, sizeof(fl4));
        fl4.daddr = daddr;
        fl4.saddr = saddr;
+       fl4.flowi4_mark = mark;
        fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
        fl4.flowi4_proto = IPPROTO_ICMP;
        security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
@@ -379,7 +382,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
                                        struct flowi4 *fl4,
                                        struct sk_buff *skb_in,
                                        const struct iphdr *iph,
-                                       __be32 saddr, u8 tos,
+                                       __be32 saddr, u8 tos, u32 mark,
                                        int type, int code,
                                        struct icmp_bxm *param)
 {
@@ -391,6 +394,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
        fl4->daddr = (param->replyopts.opt.opt.srr ?
                      param->replyopts.opt.opt.faddr : iph->saddr);
        fl4->saddr = saddr;
+       fl4->flowi4_mark = mark;
        fl4->flowi4_tos = RT_TOS(tos);
        fl4->flowi4_proto = IPPROTO_ICMP;
        fl4->fl4_icmp_type = type;
@@ -488,6 +492,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        struct flowi4 fl4;
        __be32 saddr;
        u8  tos;
+       u32 mark;
        struct net *net;
        struct sock *sk;
 
@@ -584,6 +589,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
                                           IPTOS_PREC_INTERNETCONTROL) :
                                          iph->tos;
+       mark = IP4_REPLY_MARK(net, skb_in->mark);
 
        if (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in))
                goto out_unlock;
@@ -600,11 +606,12 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        icmp_param.skb    = skb_in;
        icmp_param.offset = skb_network_offset(skb_in);
        inet_sk(sk)->tos = tos;
+       sk->sk_mark = mark;
        ipc.addr = iph->saddr;
        ipc.opt = &icmp_param.replyopts.opt;
        ipc.tx_flags = 0;
 
-       rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos,
+       rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
                               type, code, &icmp_param);
        if (IS_ERR(rt))
                goto out_unlock;
@@ -937,7 +944,8 @@ error:
 void icmp_err(struct sk_buff *skb, u32 info)
 {
        struct iphdr *iph = (struct iphdr *)skb->data;
-       struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
+       int offset = iph->ihl<<2;
+       struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset);
        int type = icmp_hdr(skb)->type;
        int code = icmp_hdr(skb)->code;
        struct net *net = dev_net(skb->dev);
@@ -947,7 +955,7 @@ void icmp_err(struct sk_buff *skb, u32 info)
         * triggered by ICMP_ECHOREPLY which sent from kernel.
         */
        if (icmph->type != ICMP_ECHOREPLY) {
-               ping_err(skb, info);
+               ping_err(skb, offset, info);
                return;
        }
 
index 6acb541c90910204f02449e7500138362da6998a..442087d371f69e637a85682378381fbd0927e812 100644 (file)
@@ -417,7 +417,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
        struct net *net = sock_net(sk);
        int flags = inet_sk_flowi_flags(sk);
 
-       flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
+       flowi4_init_output(fl4, sk->sk_bound_dev_if, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol,
                           flags,
@@ -454,7 +454,7 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
 
        rcu_read_lock();
        opt = rcu_dereference(newinet->inet_opt);
-       flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
+       flowi4_init_output(fl4, sk->sk_bound_dev_if, inet_rsk(req)->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
@@ -688,6 +688,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
                inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
                newsk->sk_write_space = sk_stream_write_space;
 
+               newsk->sk_mark = inet_rsk(req)->ir_mark;
+
                newicsk->icsk_retransmits = 0;
                newicsk->icsk_backoff     = 0;
                newicsk->icsk_probes_out  = 0;
index 5afbbbe03b0ed14ae519c2e05a9023cd077a480a..66431e0c2432273f6eb27d5d2765d314cb9f42fb 100644 (file)
@@ -1496,7 +1496,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
                        daddr = replyopts.opt.opt.faddr;
        }
 
-       flowi4_init_output(&fl4, arg->bound_dev_if, 0,
+       flowi4_init_output(&fl4, arg->bound_dev_if,
+                          IP4_REPLY_MARK(net, skb->mark),
                           RT_TOS(arg->tos),
                           RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
                           ip_reply_arg_flowi_flags(arg),
index e7916c193932222b7538c104dc71eeb8b3f8b5f2..23dfd4a312cec502abb720a188479c94937ffdbc 100644 (file)
@@ -110,6 +110,18 @@ config IP_NF_TARGET_REJECT
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config IP_NF_TARGET_REJECT_SKERR
+       bool "Force socket error when rejecting with icmp*"
+       depends on IP_NF_TARGET_REJECT
+       default n
+       help
+          This option enables turning a "--reject-with icmp*" into a matching
+          socket error also.
+         The REJECT target normally allows sending an ICMP message. But it
+          leaves the local socket unaware of any ingress rejects.
+
+         If unsure, say N.
+
 config IP_NF_TARGET_ULOG
        tristate "ULOG target support"
        default m if NETFILTER_ADVANCED=n
index 04b18c1ac3458503a2b5d79bd7fd1547c13e76d7..452e8a587c346f5f3f8ecddc6780f92f8a7e78b6 100644 (file)
@@ -129,6 +129,14 @@ static void send_reset(struct sk_buff *oldskb, int hook)
 static inline void send_unreach(struct sk_buff *skb_in, int code)
 {
        icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
+#ifdef CONFIG_IP_NF_TARGET_REJECT_SKERR
+       if (skb_in->sk) {
+               skb_in->sk->sk_err = icmp_err_convert[code].errno;
+               skb_in->sk->sk_error_report(skb_in->sk);
+               pr_debug("ipt_REJECT: sk_err=%d for skb=%p sk=%p\n",
+                       skb_in->sk->sk_err, skb_in, skb_in->sk);
+       }
+#endif
 }
 
 static unsigned int
index aa857a4a06a8ade1e953509127f14aba74742b93..b61b28a2121e712243f6a2f6b02277bd6180eba9 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/netdevice.h>
 #include <net/snmp.h>
 #include <net/ip.h>
-#include <net/ipv6.h>
 #include <net/icmp.h>
 #include <net/protocol.h>
 #include <linux/skbuff.h>
 #include <net/inet_common.h>
 #include <net/checksum.h>
 
+#if IS_ENABLED(CONFIG_IPV6)
+#include <linux/in6.h>
+#include <linux/icmpv6.h>
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+#include <net/transp_v6.h>
+#endif
 
-static struct ping_table ping_table;
+
+struct ping_table ping_table;
+struct pingv6_ops pingv6_ops;
+EXPORT_SYMBOL_GPL(pingv6_ops);
 
 static u16 ping_port_rover;
 
@@ -58,6 +67,7 @@ static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int ma
        pr_debug("hash(%d) = %d\n", num, res);
        return res;
 }
+EXPORT_SYMBOL_GPL(ping_hash);
 
 static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table,
                                             struct net *net, unsigned int num)
@@ -65,7 +75,7 @@ static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table,
        return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)];
 }
 
-static int ping_v4_get_port(struct sock *sk, unsigned short ident)
+int ping_get_port(struct sock *sk, unsigned short ident)
 {
        struct hlist_nulls_node *node;
        struct hlist_nulls_head *hlist;
@@ -103,6 +113,10 @@ next_port:
                ping_portaddr_for_each_entry(sk2, node, hlist) {
                        isk2 = inet_sk(sk2);
 
+                       /* BUG? Why is this reuse and not reuseaddr? ping.c
+                        * doesn't turn off SO_REUSEADDR, and it doesn't expect
+                        * that other ping processes can steal its packets.
+                        */
                        if ((isk2->inet_num == ident) &&
                            (sk2 != sk) &&
                            (!sk2->sk_reuse || !sk->sk_reuse))
@@ -125,17 +139,18 @@ fail:
        write_unlock_bh(&ping_table.lock);
        return 1;
 }
+EXPORT_SYMBOL_GPL(ping_get_port);
 
-static void ping_v4_hash(struct sock *sk)
+void ping_hash(struct sock *sk)
 {
-       pr_debug("ping_v4_hash(sk->port=%u)\n", inet_sk(sk)->inet_num);
+       pr_debug("ping_hash(sk->port=%u)\n", inet_sk(sk)->inet_num);
        BUG(); /* "Please do not press this button again." */
 }
 
-static void ping_v4_unhash(struct sock *sk)
+void ping_unhash(struct sock *sk)
 {
        struct inet_sock *isk = inet_sk(sk);
-       pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
+       pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
        if (sk_hashed(sk)) {
                write_lock_bh(&ping_table.lock);
                hlist_nulls_del(&sk->sk_nulls_node);
@@ -146,31 +161,61 @@ static void ping_v4_unhash(struct sock *sk)
                write_unlock_bh(&ping_table.lock);
        }
 }
+EXPORT_SYMBOL_GPL(ping_unhash);
 
-static struct sock *ping_v4_lookup(struct net *net, __be32 saddr, __be32 daddr,
-                                  u16 ident, int dif)
+static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
 {
        struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident);
        struct sock *sk = NULL;
        struct inet_sock *isk;
        struct hlist_nulls_node *hnode;
+       int dif = skb->dev->ifindex;
+
+       if (skb->protocol == htons(ETH_P_IP)) {
+               pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
+                        (int)ident, &ip_hdr(skb)->daddr, dif);
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n",
+                        (int)ident, &ipv6_hdr(skb)->daddr, dif);
+#endif
+       }
 
-       pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
-                (int)ident, &daddr, dif);
        read_lock_bh(&ping_table.lock);
 
        ping_portaddr_for_each_entry(sk, hnode, hslot) {
                isk = inet_sk(sk);
 
-               pr_debug("found: %p: num = %d, daddr = %pI4, dif = %d\n", sk,
-                        (int)isk->inet_num, &isk->inet_rcv_saddr,
-                        sk->sk_bound_dev_if);
-
                pr_debug("iterate\n");
                if (isk->inet_num != ident)
                        continue;
-               if (isk->inet_rcv_saddr && isk->inet_rcv_saddr != daddr)
-                       continue;
+
+               if (skb->protocol == htons(ETH_P_IP) &&
+                   sk->sk_family == AF_INET) {
+                       pr_debug("found: %p: num=%d, daddr=%pI4, dif=%d\n", sk,
+                                (int) isk->inet_num, &isk->inet_rcv_saddr,
+                                sk->sk_bound_dev_if);
+
+                       if (isk->inet_rcv_saddr &&
+                           isk->inet_rcv_saddr != ip_hdr(skb)->daddr)
+                               continue;
+#if IS_ENABLED(CONFIG_IPV6)
+               } else if (skb->protocol == htons(ETH_P_IPV6) &&
+                          sk->sk_family == AF_INET6) {
+                       struct ipv6_pinfo *np = inet6_sk(sk);
+
+                       pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk,
+                                (int) isk->inet_num,
+                                &inet6_sk(sk)->rcv_saddr,
+                                sk->sk_bound_dev_if);
+
+                       if (!ipv6_addr_any(&np->rcv_saddr) &&
+                           !ipv6_addr_equal(&np->rcv_saddr,
+                                            &ipv6_hdr(skb)->daddr))
+                               continue;
+#endif
+               }
+
                if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
                        continue;
 
@@ -200,7 +245,7 @@ static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
 }
 
 
-static int ping_init_sock(struct sock *sk)
+int ping_init_sock(struct sock *sk)
 {
        struct net *net = sock_net(sk);
        kgid_t group = current_egid();
@@ -232,8 +277,9 @@ out_release_group:
        put_group_info(group_info);
        return ret;
 }
+EXPORT_SYMBOL_GPL(ping_init_sock);
 
-static void ping_close(struct sock *sk, long timeout)
+void ping_close(struct sock *sk, long timeout)
 {
        pr_debug("ping_close(sk=%p,sk->num=%u)\n",
                 inet_sk(sk), inet_sk(sk)->inet_num);
@@ -241,36 +287,122 @@ static void ping_close(struct sock *sk, long timeout)
 
        sk_common_release(sk);
 }
+EXPORT_SYMBOL_GPL(ping_close);
+
+/* Checks the bind address and possibly modifies sk->sk_bound_dev_if. */
+int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
+                        struct sockaddr *uaddr, int addr_len) {
+       struct net *net = sock_net(sk);
+       if (sk->sk_family == AF_INET) {
+               struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
+               int chk_addr_ret;
+
+               if (addr_len < sizeof(*addr))
+                       return -EINVAL;
+
+               pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
+                        sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
+
+               chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
 
+               if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
+                       chk_addr_ret = RTN_LOCAL;
+
+               if ((sysctl_ip_nonlocal_bind == 0 &&
+                   isk->freebind == 0 && isk->transparent == 0 &&
+                    chk_addr_ret != RTN_LOCAL) ||
+                   chk_addr_ret == RTN_MULTICAST ||
+                   chk_addr_ret == RTN_BROADCAST)
+                       return -EADDRNOTAVAIL;
+
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (sk->sk_family == AF_INET6) {
+               struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
+               int addr_type, scoped, has_addr;
+               struct net_device *dev = NULL;
+
+               if (addr_len < sizeof(*addr))
+                       return -EINVAL;
+
+               pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
+                        sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
+
+               addr_type = ipv6_addr_type(&addr->sin6_addr);
+               scoped = __ipv6_addr_needs_scope_id(addr_type);
+               if ((addr_type != IPV6_ADDR_ANY &&
+                    !(addr_type & IPV6_ADDR_UNICAST)) ||
+                   (scoped && !addr->sin6_scope_id))
+                       return -EINVAL;
+
+               rcu_read_lock();
+               if (addr->sin6_scope_id) {
+                       dev = dev_get_by_index_rcu(net, addr->sin6_scope_id);
+                       if (!dev) {
+                               rcu_read_unlock();
+                               return -ENODEV;
+                       }
+               }
+               has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
+                                                   scoped);
+               rcu_read_unlock();
+
+               if (!(isk->freebind || isk->transparent || has_addr ||
+                     addr_type == IPV6_ADDR_ANY))
+                       return -EADDRNOTAVAIL;
+
+               if (scoped)
+                       sk->sk_bound_dev_if = addr->sin6_scope_id;
+#endif
+       } else {
+               return -EAFNOSUPPORT;
+       }
+       return 0;
+}
+
+void ping_set_saddr(struct sock *sk, struct sockaddr *saddr)
+{
+       if (saddr->sa_family == AF_INET) {
+               struct inet_sock *isk = inet_sk(sk);
+               struct sockaddr_in *addr = (struct sockaddr_in *) saddr;
+               isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr;
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (saddr->sa_family == AF_INET6) {
+               struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr;
+               struct ipv6_pinfo *np = inet6_sk(sk);
+               np->rcv_saddr = np->saddr = addr->sin6_addr;
+#endif
+       }
+}
+
+void ping_clear_saddr(struct sock *sk, int dif)
+{
+       sk->sk_bound_dev_if = dif;
+       if (sk->sk_family == AF_INET) {
+               struct inet_sock *isk = inet_sk(sk);
+               isk->inet_rcv_saddr = isk->inet_saddr = 0;
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (sk->sk_family == AF_INET6) {
+               struct ipv6_pinfo *np = inet6_sk(sk);
+               memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
+               memset(&np->saddr, 0, sizeof(np->saddr));
+#endif
+       }
+}
 /*
  * We need our own bind because there are no privileged id's == local ports.
  * Moreover, we don't allow binding to multi- and broadcast addresses.
  */
 
-static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
-       struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
        struct inet_sock *isk = inet_sk(sk);
        unsigned short snum;
-       int chk_addr_ret;
        int err;
+       int dif = sk->sk_bound_dev_if;
 
-       if (addr_len < sizeof(struct sockaddr_in))
-               return -EINVAL;
-
-       pr_debug("ping_v4_bind(sk=%p,sa_addr=%08x,sa_port=%d)\n",
-                sk, addr->sin_addr.s_addr, ntohs(addr->sin_port));
-
-       chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
-       if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
-               chk_addr_ret = RTN_LOCAL;
-
-       if ((sysctl_ip_nonlocal_bind == 0 &&
-           isk->freebind == 0 && isk->transparent == 0 &&
-            chk_addr_ret != RTN_LOCAL) ||
-           chk_addr_ret == RTN_MULTICAST ||
-           chk_addr_ret == RTN_BROADCAST)
-               return -EADDRNOTAVAIL;
+       err = ping_check_bind_addr(sk, isk, uaddr, addr_len);
+       if (err)
+               return err;
 
        lock_sock(sk);
 
@@ -279,42 +411,50 @@ static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                goto out;
 
        err = -EADDRINUSE;
-       isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr;
-       snum = ntohs(addr->sin_port);
-       if (ping_v4_get_port(sk, snum) != 0) {
-               isk->inet_saddr = isk->inet_rcv_saddr = 0;
+       ping_set_saddr(sk, uaddr);
+       snum = ntohs(((struct sockaddr_in *)uaddr)->sin_port);
+       if (ping_get_port(sk, snum) != 0) {
+               ping_clear_saddr(sk, dif);
                goto out;
        }
 
-       pr_debug("after bind(): num = %d, daddr = %pI4, dif = %d\n",
+       pr_debug("after bind(): num = %d, dif = %d\n",
                 (int)isk->inet_num,
-                &isk->inet_rcv_saddr,
                 (int)sk->sk_bound_dev_if);
 
        err = 0;
-       if (isk->inet_rcv_saddr)
+       if ((sk->sk_family == AF_INET && isk->inet_rcv_saddr) ||
+           (sk->sk_family == AF_INET6 &&
+            !ipv6_addr_any(&inet6_sk(sk)->rcv_saddr)))
                sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
+
        if (snum)
                sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
        isk->inet_sport = htons(isk->inet_num);
        isk->inet_daddr = 0;
        isk->inet_dport = 0;
+
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == AF_INET6)
+               memset(&inet6_sk(sk)->daddr, 0, sizeof(inet6_sk(sk)->daddr));
+#endif
+
        sk_dst_reset(sk);
 out:
        release_sock(sk);
        pr_debug("ping_v4_bind -> %d\n", err);
        return err;
 }
+EXPORT_SYMBOL_GPL(ping_bind);
 
 /*
  * Is this a supported type of ICMP message?
  */
 
-static inline int ping_supported(int type, int code)
+static inline int ping_supported(int family, int type, int code)
 {
-       if (type == ICMP_ECHO && code == 0)
-               return 1;
-       return 0;
+       return (family == AF_INET && type == ICMP_ECHO && code == 0) ||
+              (family == AF_INET6 && type == ICMPV6_ECHO_REQUEST && code == 0);
 }
 
 /*
@@ -322,30 +462,42 @@ static inline int ping_supported(int type, int code)
  * sort of error condition.
  */
 
-static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-
-void ping_err(struct sk_buff *skb, u32 info)
+void ping_err(struct sk_buff *skb, int offset, u32 info)
 {
-       struct iphdr *iph = (struct iphdr *)skb->data;
-       struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
+       int family;
+       struct icmphdr *icmph;
        struct inet_sock *inet_sock;
-       int type = icmp_hdr(skb)->type;
-       int code = icmp_hdr(skb)->code;
+       int type;
+       int code;
        struct net *net = dev_net(skb->dev);
        struct sock *sk;
        int harderr;
        int err;
 
+       if (skb->protocol == htons(ETH_P_IP)) {
+               family = AF_INET;
+               type = icmp_hdr(skb)->type;
+               code = icmp_hdr(skb)->code;
+               icmph = (struct icmphdr *)(skb->data + offset);
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               family = AF_INET6;
+               type = icmp6_hdr(skb)->icmp6_type;
+               code = icmp6_hdr(skb)->icmp6_code;
+               icmph = (struct icmphdr *) (skb->data + offset);
+       } else {
+               BUG();
+       }
+
        /* We assume the packet has already been checked by icmp_unreach */
 
-       if (!ping_supported(icmph->type, icmph->code))
+       if (!ping_supported(family, icmph->type, icmph->code))
                return;
 
-       pr_debug("ping_err(type=%04x,code=%04x,id=%04x,seq=%04x)\n", type,
-                code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
+       pr_debug("ping_err(proto=0x%x,type=%d,code=%d,id=%04x,seq=%04x)\n",
+                skb->protocol, type, code, ntohs(icmph->un.echo.id),
+                ntohs(icmph->un.echo.sequence));
 
-       sk = ping_v4_lookup(net, iph->daddr, iph->saddr,
-                           ntohs(icmph->un.echo.id), skb->dev->ifindex);
+       sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
        if (sk == NULL) {
                pr_debug("no socket, dropping\n");
                return; /* No socket for error */
@@ -356,72 +508,83 @@ void ping_err(struct sk_buff *skb, u32 info)
        harderr = 0;
        inet_sock = inet_sk(sk);
 
-       switch (type) {
-       default:
-       case ICMP_TIME_EXCEEDED:
-               err = EHOSTUNREACH;
-               break;
-       case ICMP_SOURCE_QUENCH:
-               /* This is not a real error but ping wants to see it.
-                * Report it with some fake errno. */
-               err = EREMOTEIO;
-               break;
-       case ICMP_PARAMETERPROB:
-               err = EPROTO;
-               harderr = 1;
-               break;
-       case ICMP_DEST_UNREACH:
-               if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
-                       ipv4_sk_update_pmtu(skb, sk, info);
-                       if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
-                               err = EMSGSIZE;
-                               harderr = 1;
-                               break;
+       if (skb->protocol == htons(ETH_P_IP)) {
+               switch (type) {
+               default:
+               case ICMP_TIME_EXCEEDED:
+                       err = EHOSTUNREACH;
+                       break;
+               case ICMP_SOURCE_QUENCH:
+                       /* This is not a real error but ping wants to see it.
+                        * Report it with some fake errno.
+                        */
+                       err = EREMOTEIO;
+                       break;
+               case ICMP_PARAMETERPROB:
+                       err = EPROTO;
+                       harderr = 1;
+                       break;
+               case ICMP_DEST_UNREACH:
+                       if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
+                               ipv4_sk_update_pmtu(skb, sk, info);
+                               if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
+                                       err = EMSGSIZE;
+                                       harderr = 1;
+                                       break;
+                               }
+                               goto out;
                        }
-                       goto out;
-               }
-               err = EHOSTUNREACH;
-               if (code <= NR_ICMP_UNREACH) {
-                       harderr = icmp_err_convert[code].fatal;
-                       err = icmp_err_convert[code].errno;
+                       err = EHOSTUNREACH;
+                       if (code <= NR_ICMP_UNREACH) {
+                               harderr = icmp_err_convert[code].fatal;
+                               err = icmp_err_convert[code].errno;
+                       }
+                       break;
+               case ICMP_REDIRECT:
+                       /* See ICMP_SOURCE_QUENCH */
+                       ipv4_sk_redirect(skb, sk);
+                       err = EREMOTEIO;
+                       break;
                }
-               break;
-       case ICMP_REDIRECT:
-               /* See ICMP_SOURCE_QUENCH */
-               ipv4_sk_redirect(skb, sk);
-               err = EREMOTEIO;
-               break;
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
+#endif
        }
 
        /*
         *      RFC1122: OK.  Passes ICMP errors back to application, as per
         *      4.1.3.3.
         */
-       if (!inet_sock->recverr) {
+       if ((family == AF_INET && !inet_sock->recverr) ||
+           (family == AF_INET6 && !inet6_sk(sk)->recverr)) {
                if (!harderr || sk->sk_state != TCP_ESTABLISHED)
                        goto out;
        } else {
-               ip_icmp_error(sk, skb, err, 0 /* no remote port */,
-                        info, (u8 *)icmph);
+               if (family == AF_INET) {
+                       ip_icmp_error(sk, skb, err, 0 /* no remote port */,
+                                     info, (u8 *)icmph);
+#if IS_ENABLED(CONFIG_IPV6)
+               } else if (family == AF_INET6) {
+                       pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
+                                                  info, (u8 *)icmph);
+#endif
+               }
        }
        sk->sk_err = err;
        sk->sk_error_report(sk);
 out:
        sock_put(sk);
 }
+EXPORT_SYMBOL_GPL(ping_err);
 
 /*
- *     Copy and checksum an ICMP Echo packet from user space into a buffer.
+ *     Copy and checksum an ICMP Echo packet from user space into a buffer
+ *     starting from the payload.
  */
 
-struct pingfakehdr {
-       struct icmphdr icmph;
-       struct iovec *iov;
-       __wsum wcheck;
-};
-
-static int ping_getfrag(void *from, char *to,
-                       int offset, int fraglen, int odd, struct sk_buff *skb)
+int ping_getfrag(void *from, char *to,
+                int offset, int fraglen, int odd, struct sk_buff *skb)
 {
        struct pingfakehdr *pfh = (struct pingfakehdr *)from;
 
@@ -432,20 +595,33 @@ static int ping_getfrag(void *from, char *to,
                            pfh->iov, 0, fraglen - sizeof(struct icmphdr),
                            &pfh->wcheck))
                        return -EFAULT;
+       } else if (offset < sizeof(struct icmphdr)) {
+                       BUG();
+       } else {
+               if (csum_partial_copy_fromiovecend
+                               (to, pfh->iov, offset - sizeof(struct icmphdr),
+                                fraglen, &pfh->wcheck))
+                       return -EFAULT;
+       }
 
-               return 0;
+#if IS_ENABLED(CONFIG_IPV6)
+       /* For IPv6, checksum each skb as we go along, as expected by
+        * icmpv6_push_pending_frames. For IPv4, accumulate the checksum in
+        * wcheck, it will be finalized in ping_v4_push_pending_frames.
+        */
+       if (pfh->family == AF_INET6) {
+               skb->csum = pfh->wcheck;
+               skb->ip_summed = CHECKSUM_NONE;
+               pfh->wcheck = 0;
        }
-       if (offset < sizeof(struct icmphdr))
-               BUG();
-       if (csum_partial_copy_fromiovecend
-                       (to, pfh->iov, offset - sizeof(struct icmphdr),
-                        fraglen, &pfh->wcheck))
-               return -EFAULT;
+#endif
+
        return 0;
 }
+EXPORT_SYMBOL_GPL(ping_getfrag);
 
-static int ping_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
-                                   struct flowi4 *fl4)
+static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
+                                      struct flowi4 *fl4)
 {
        struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
 
@@ -457,24 +633,9 @@ static int ping_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
        return ip_push_pending_frames(sk, fl4);
 }
 
-static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                       size_t len)
-{
-       struct net *net = sock_net(sk);
-       struct flowi4 fl4;
-       struct inet_sock *inet = inet_sk(sk);
-       struct ipcm_cookie ipc;
-       struct icmphdr user_icmph;
-       struct pingfakehdr pfh;
-       struct rtable *rt = NULL;
-       struct ip_options_data opt_copy;
-       int free = 0;
-       __be32 saddr, daddr, faddr;
-       u8  tos;
-       int err;
-
-       pr_debug("ping_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
-
+int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
+                       void *user_icmph, size_t icmph_len) {
+       u8 type, code;
 
        if (len > 0xFFFF)
                return -EMSGSIZE;
@@ -489,15 +650,53 @@ static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        /*
         *      Fetch the ICMP header provided by the userland.
-        *      iovec is modified!
+        *      iovec is modified! The ICMP header is consumed.
         */
-
-       if (memcpy_fromiovec((u8 *)&user_icmph, msg->msg_iov,
-                            sizeof(struct icmphdr)))
+       if (memcpy_fromiovec(user_icmph, msg->msg_iov, icmph_len))
                return -EFAULT;
-       if (!ping_supported(user_icmph.type, user_icmph.code))
+
+       if (family == AF_INET) {
+               type = ((struct icmphdr *) user_icmph)->type;
+               code = ((struct icmphdr *) user_icmph)->code;
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (family == AF_INET6) {
+               type = ((struct icmp6hdr *) user_icmph)->icmp6_type;
+               code = ((struct icmp6hdr *) user_icmph)->icmp6_code;
+#endif
+       } else {
+               BUG();
+       }
+
+       if (!ping_supported(family, type, code))
                return -EINVAL;
 
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ping_common_sendmsg);
+
+int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                   size_t len)
+{
+       struct net *net = sock_net(sk);
+       struct flowi4 fl4;
+       struct inet_sock *inet = inet_sk(sk);
+       struct ipcm_cookie ipc;
+       struct icmphdr user_icmph;
+       struct pingfakehdr pfh;
+       struct rtable *rt = NULL;
+       struct ip_options_data opt_copy;
+       int free = 0;
+       __be32 saddr, daddr, faddr;
+       u8  tos;
+       int err;
+
+       pr_debug("ping_v4_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
+
+       err = ping_common_sendmsg(AF_INET, msg, len, &user_icmph,
+                                 sizeof(user_icmph));
+       if (err)
+               return err;
+
        /*
         *      Get and verify the address.
         */
@@ -602,13 +801,14 @@ back_from_confirm:
        pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence;
        pfh.iov = msg->msg_iov;
        pfh.wcheck = 0;
+       pfh.family = AF_INET;
 
        err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len,
                        0, &ipc, &rt, msg->msg_flags);
        if (err)
                ip_flush_pending_frames(sk);
        else
-               err = ping_push_pending_frames(sk, &pfh, &fl4);
+               err = ping_v4_push_pending_frames(sk, &pfh, &fl4);
        release_sock(sk);
 
 out:
@@ -629,10 +829,13 @@ do_confirm:
        goto out;
 }
 
-static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                       size_t len, int noblock, int flags, int *addr_len)
+int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                size_t len, int noblock, int flags, int *addr_len)
 {
        struct inet_sock *isk = inet_sk(sk);
+       int family = sk->sk_family;
+        struct sockaddr_in *sin;
+        struct sockaddr_in6 *sin6;
        struct sk_buff *skb;
        int copied, err;
 
@@ -642,8 +845,22 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        if (flags & MSG_OOB)
                goto out;
 
-       if (flags & MSG_ERRQUEUE)
-               return ip_recv_error(sk, msg, len, addr_len);
+        if (addr_len) {
+                if (family == AF_INET)
+                        *addr_len = sizeof(*sin);
+                else if (family == AF_INET6 && addr_len)
+                        *addr_len = sizeof(*sin6);
+        }
+
+       if (flags & MSG_ERRQUEUE) {
+               if (family == AF_INET) {
+                       return ip_recv_error(sk, msg, len, addr_len);
+#if IS_ENABLED(CONFIG_IPV6)
+               } else if (family == AF_INET6) {
+                       return pingv6_ops.ipv6_recv_error(sk, msg, len);
+#endif
+               }
+       }
 
        skb = skb_recv_datagram(sk, flags, noblock, &err);
        if (!skb)
@@ -662,18 +879,44 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        sock_recv_timestamp(msg, sk, skb);
 
-       /* Copy the address. */
-       if (msg->msg_name) {
-               struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
+       /* Copy the address and add cmsg data. */
+       if (family == AF_INET) {
+               sin = (struct sockaddr_in *) msg->msg_name;
+               if (sin) {
+                       sin->sin_family = AF_INET;
+                       sin->sin_port = 0 /* skb->h.uh->source */;
+                       sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+                       memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+               }
+
+               if (isk->cmsg_flags)
+                       ip_cmsg_recv(msg, skb);
+
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (family == AF_INET6) {
+               struct ipv6_pinfo *np = inet6_sk(sk);
+               struct ipv6hdr *ip6 = ipv6_hdr(skb);
+               sin6 = (struct sockaddr_in6 *) msg->msg_name;
+
+               if (sin6) {
+                       sin6->sin6_family = AF_INET6;
+                       sin6->sin6_port = 0;
+                       sin6->sin6_addr = ip6->saddr;
+                       sin6->sin6_flowinfo = 0;
+                       if (np->sndflow)
+                               sin6->sin6_flowinfo = ip6_flowinfo(ip6);
+                       sin6->sin6_scope_id =
+                               ipv6_iface_scope_id(&sin6->sin6_addr,
+                                                   IP6CB(skb)->iif);
+               }
 
-               sin->sin_family = AF_INET;
-               sin->sin_port = 0 /* skb->h.uh->source */;
-               sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
-               memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
-               *addr_len = sizeof(*sin);
+               if (inet6_sk(sk)->rxopt.all)
+                       pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
+#endif
+       } else {
+               BUG();
        }
-       if (isk->cmsg_flags)
-               ip_cmsg_recv(msg, skb);
+
        err = copied;
 
 done:
@@ -682,8 +925,9 @@ out:
        pr_debug("ping_recvmsg -> %d\n", err);
        return err;
 }
+EXPORT_SYMBOL_GPL(ping_recvmsg);
 
-static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
        pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
                 inet_sk(sk), inet_sk(sk)->inet_num, skb);
@@ -694,6 +938,7 @@ static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        }
        return 0;
 }
+EXPORT_SYMBOL_GPL(ping_queue_rcv_skb);
 
 
 /*
@@ -704,10 +949,7 @@ void ping_rcv(struct sk_buff *skb)
 {
        struct sock *sk;
        struct net *net = dev_net(skb->dev);
-       struct iphdr *iph = ip_hdr(skb);
        struct icmphdr *icmph = icmp_hdr(skb);
-       __be32 saddr = iph->saddr;
-       __be32 daddr = iph->daddr;
 
        /* We assume the packet has already been checked by icmp_rcv */
 
@@ -717,8 +959,7 @@ void ping_rcv(struct sk_buff *skb)
        /* Push ICMP header back */
        skb_push(skb, skb->data - (u8 *)icmph);
 
-       sk = ping_v4_lookup(net, saddr, daddr, ntohs(icmph->un.echo.id),
-                           skb->dev->ifindex);
+       sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
        if (sk != NULL) {
                pr_debug("rcv on socket %p\n", sk);
                ping_queue_rcv_skb(sk, skb_get(skb));
@@ -729,6 +970,7 @@ void ping_rcv(struct sk_buff *skb)
 
        /* We're called from icmp_rcv(). kfree_skb() is done there. */
 }
+EXPORT_SYMBOL_GPL(ping_rcv);
 
 struct proto ping_prot = {
        .name =         "PING",
@@ -739,14 +981,14 @@ struct proto ping_prot = {
        .disconnect =   udp_disconnect,
        .setsockopt =   ip_setsockopt,
        .getsockopt =   ip_getsockopt,
-       .sendmsg =      ping_sendmsg,
+       .sendmsg =      ping_v4_sendmsg,
        .recvmsg =      ping_recvmsg,
        .bind =         ping_bind,
        .backlog_rcv =  ping_queue_rcv_skb,
        .release_cb =   ip4_datagram_release_cb,
-       .hash =         ping_v4_hash,
-       .unhash =       ping_v4_unhash,
-       .get_port =     ping_v4_get_port,
+       .hash =         ping_hash,
+       .unhash =       ping_unhash,
+       .get_port =     ping_get_port,
        .obj_size =     sizeof(struct inet_sock),
 };
 EXPORT_SYMBOL(ping_prot);
index d4d162eac4df095821a20191e41904e575d6e533..43ef33d5b9149a3b79dd1f6c98e6c133aee9bd24 100644 (file)
@@ -971,6 +971,9 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
        struct flowi4 fl4;
        struct rtable *rt;
 
+       if (!mark)
+               mark = IP4_REPLY_MARK(net, skb->mark);
+
        __build_flow_key(&fl4, NULL, iph, oif,
                         RT_TOS(iph->tos), protocol, mark, flow_flags);
        rt = __ip_route_output_key(net, &fl4);
@@ -988,6 +991,10 @@ static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        struct rtable *rt;
 
        __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+
+       if (!fl4.flowi4_mark)
+               fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
+
        rt = __ip_route_output_key(sock_net(sk), &fl4);
        if (!IS_ERR(rt)) {
                __ip_rt_update_pmtu(rt, &fl4, mtu);
index b05c96e7af8b810a62bb07d95436eea07c651008..5abb45e281bea0ffdcfd14d9e017742d7b19c71a 100644 (file)
@@ -312,6 +312,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
        ireq->rmt_port          = th->source;
        ireq->loc_addr          = ip_hdr(skb)->daddr;
        ireq->rmt_addr          = ip_hdr(skb)->saddr;
+       ireq->ir_mark           = inet_request_mark(sk, skb);
        ireq->ecn_ok            = ecn_ok;
        ireq->snd_wscale        = tcp_opt.snd_wscale;
        ireq->sack_ok           = tcp_opt.sack_ok;
@@ -348,7 +349,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
         * hasn't changed since we received the original syn, but I see
         * no easy way to do this.
         */
-       flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
+       flowi4_init_output(&fl4, sk->sk_bound_dev_if, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
                           inet_sk_flowi_flags(sk),
                           (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
index 90b26beb84d41c614767d77188d0f3426e9a9658..2c707a91e04c32bf47f5362fe7f10b9427f32d6a 100644 (file)
@@ -138,6 +138,21 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
        return ret;
 }
 
+/* Validate changes from /proc interface. */
+static int proc_tcp_default_init_rwnd(ctl_table *ctl, int write,
+                                     void __user *buffer,
+                                     size_t *lenp, loff_t *ppos)
+{
+       int old_value = *(int *)ctl->data;
+       int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+       int new_value = *(int *)ctl->data;
+
+       if (write && ret == 0 && (new_value < 3 || new_value > 100))
+               *(int *)ctl->data = old_value;
+
+       return ret;
+}
+
 static int proc_tcp_congestion_control(ctl_table *ctl, int write,
                                       void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -737,7 +752,7 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-        {
+       {
                .procname       = "tcp_thin_dupack",
                .data           = &sysctl_tcp_thin_dupack,
                .maxlen         = sizeof(int),
@@ -762,6 +777,13 @@ static struct ctl_table ipv4_table[] = {
                .extra1         = &zero,
                .extra2         = &gso_max_segs,
        },
+       {
+               .procname       = "tcp_default_init_rwnd",
+               .data           = &sysctl_tcp_default_init_rwnd,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_tcp_default_init_rwnd
+       },
        {
                .procname       = "udp_mem",
                .data           = &sysctl_udp_mem,
@@ -851,6 +873,20 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = ipv4_tcp_mem,
        },
+       {
+               .procname       = "fwmark_reflect",
+               .data           = &init_net.ipv4.sysctl_fwmark_reflect,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "tcp_fwmark_accept",
+               .data           = &init_net.ipv4.sysctl_tcp_fwmark_accept,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
        { }
 };
 
diff --git a/net/ipv4/sysfs_net_ipv4.c b/net/ipv4/sysfs_net_ipv4.c
new file mode 100644 (file)
index 0000000..0cbbf10
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * net/ipv4/sysfs_net_ipv4.c
+ *
+ * sysfs-based networking knobs (so we can, unlike with sysctl, control perms)
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <net/tcp.h>
+
+#define CREATE_IPV4_FILE(_name, _var) \
+static ssize_t _name##_show(struct kobject *kobj, \
+                           struct kobj_attribute *attr, char *buf) \
+{ \
+       return sprintf(buf, "%d\n", _var); \
+} \
+static ssize_t _name##_store(struct kobject *kobj, \
+                            struct kobj_attribute *attr, \
+                            const char *buf, size_t count) \
+{ \
+       int val, ret; \
+       ret = sscanf(buf, "%d", &val); \
+       if (ret != 1) \
+               return -EINVAL; \
+       if (val < 0) \
+               return -EINVAL; \
+       _var = val; \
+       return count; \
+} \
+static struct kobj_attribute _name##_attr = \
+       __ATTR(_name, 0644, _name##_show, _name##_store)
+
+CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]);
+CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]);
+CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]);
+
+CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]);
+CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]);
+CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]);
+
+static struct attribute *ipv4_attrs[] = {
+       &tcp_wmem_min_attr.attr,
+       &tcp_wmem_def_attr.attr,
+       &tcp_wmem_max_attr.attr,
+       &tcp_rmem_min_attr.attr,
+       &tcp_rmem_def_attr.attr,
+       &tcp_rmem_max_attr.attr,
+       NULL
+};
+
+static struct attribute_group ipv4_attr_group = {
+       .attrs = ipv4_attrs,
+};
+
+static __init int sysfs_ipv4_init(void)
+{
+       struct kobject *ipv4_kobject;
+       int ret;
+
+       ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj);
+       if (!ipv4_kobject)
+               return -ENOMEM;
+
+       ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group);
+       if (ret) {
+               kobject_put(ipv4_kobject);
+               return ret;
+       }
+
+       return 0;
+}
+
+subsys_initcall(sysfs_ipv4_init);
index 5d4bd6ca3ab1e46cb02322dfecc563ac053484d9..4619ae6c5d50c393a99eb4a533e90a9f3b3f781c 100644 (file)
 #include <linux/crypto.h>
 #include <linux/time.h>
 #include <linux/slab.h>
+#include <linux/uid_stat.h>
 
 #include <net/icmp.h>
 #include <net/inet_common.h>
 #include <net/tcp.h>
 #include <net/xfrm.h>
 #include <net/ip.h>
+#include <net/ip6_route.h>
+#include <net/ipv6.h>
+#include <net/transp_v6.h>
 #include <net/netdma.h>
 #include <net/sock.h>
 
@@ -1240,6 +1244,9 @@ out:
                tcp_push(sk, flags, mss_now, tp->nonagle);
 out_nopush:
        release_sock(sk);
+
+       if (copied + copied_syn)
+               uid_stat_tcp_snd(current_uid(), copied + copied_syn);
        return copied + copied_syn;
 
 do_fault:
@@ -1544,6 +1551,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
        if (copied > 0) {
                tcp_recv_skb(sk, seq, &offset);
                tcp_cleanup_rbuf(sk, copied);
+               uid_stat_tcp_rcv(current_uid(), copied);
        }
        return copied;
 }
@@ -1948,6 +1956,9 @@ skip_copy:
        tcp_cleanup_rbuf(sk, copied);
 
        release_sock(sk);
+
+       if (copied > 0)
+               uid_stat_tcp_rcv(current_uid(), copied);
        return copied;
 
 out:
@@ -1956,6 +1967,8 @@ out:
 
 recv_urg:
        err = tcp_recv_urg(sk, msg, len, flags);
+       if (err > 0)
+               uid_stat_tcp_rcv(current_uid(), err);
        goto out;
 
 recv_sndq:
@@ -3468,3 +3481,107 @@ void __init tcp_init(void)
 
        tcp_tasklet_init();
 }
+
+static int tcp_is_local(struct net *net, __be32 addr) {
+       struct rtable *rt;
+       struct flowi4 fl4 = { .daddr = addr };
+       rt = ip_route_output_key(net, &fl4);
+       if (IS_ERR_OR_NULL(rt))
+               return 0;
+       return rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK);
+}
+
+#if defined(CONFIG_IPV6)
+static int tcp_is_local6(struct net *net, struct in6_addr *addr) {
+       struct rt6_info *rt6 = rt6_lookup(net, addr, addr, 0, 0);
+       return rt6 && rt6->dst.dev && (rt6->dst.dev->flags & IFF_LOOPBACK);
+}
+#endif
+
+/*
+ * tcp_nuke_addr - destroy all sockets on the given local address
+ * if local address is the unspecified address (0.0.0.0 or ::), destroy all
+ * sockets with local addresses that are not configured.
+ */
+int tcp_nuke_addr(struct net *net, struct sockaddr *addr)
+{
+       int family = addr->sa_family;
+       unsigned int bucket;
+
+       struct in_addr *in;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+       struct in6_addr *in6;
+#endif
+       if (family == AF_INET) {
+               in = &((struct sockaddr_in *)addr)->sin_addr;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+       } else if (family == AF_INET6) {
+               in6 = &((struct sockaddr_in6 *)addr)->sin6_addr;
+#endif
+       } else {
+               return -EAFNOSUPPORT;
+       }
+
+       for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
+               struct hlist_nulls_node *node;
+               struct sock *sk;
+               spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
+
+restart:
+               spin_lock_bh(lock);
+               sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
+                       struct inet_sock *inet = inet_sk(sk);
+
+                       if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
+                               continue;
+                       if (sock_flag(sk, SOCK_DEAD))
+                               continue;
+
+                       if (family == AF_INET) {
+                               __be32 s4 = inet->inet_rcv_saddr;
+                               if (s4 == LOOPBACK4_IPV6)
+                                       continue;
+
+                               if (in->s_addr != s4 &&
+                                   !(in->s_addr == INADDR_ANY &&
+                                     !tcp_is_local(net, s4)))
+                                       continue;
+                       }
+
+#if defined(CONFIG_IPV6)
+                       if (family == AF_INET6) {
+                               struct in6_addr *s6;
+                               if (!inet->pinet6)
+                                       continue;
+
+                               s6 = &inet->pinet6->rcv_saddr;
+                               if (ipv6_addr_type(s6) == IPV6_ADDR_MAPPED)
+                                       continue;
+
+                               if (!ipv6_addr_equal(in6, s6) &&
+                                   !(ipv6_addr_equal(in6, &in6addr_any) &&
+                                     !tcp_is_local6(net, s6)))
+                               continue;
+                       }
+#endif
+
+                       sock_hold(sk);
+                       spin_unlock_bh(lock);
+
+                       local_bh_disable();
+                       bh_lock_sock(sk);
+                       sk->sk_err = ETIMEDOUT;
+                       sk->sk_error_report(sk);
+
+                       tcp_done(sk);
+                       bh_unlock_sock(sk);
+                       local_bh_enable();
+                       sock_put(sk);
+
+                       goto restart;
+               }
+               spin_unlock_bh(lock);
+       }
+
+       return 0;
+}
index ea7f52f3062d80241572e65816e17a3567982cb1..7aa7faa7c3dde7911c3b10ed4760aa93d7e7a4a4 100644 (file)
@@ -98,6 +98,7 @@ int sysctl_tcp_thin_dupack __read_mostly;
 
 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
 int sysctl_tcp_early_retrans __read_mostly = 3;
+int sysctl_tcp_default_init_rwnd __read_mostly = TCP_DEFAULT_INIT_RCVWND;
 
 #define FLAG_DATA              0x01 /* Incoming frame contained data.          */
 #define FLAG_WIN_UPDATE                0x02 /* Incoming ACK was a window update.       */
@@ -351,14 +352,14 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
 static void tcp_fixup_rcvbuf(struct sock *sk)
 {
        u32 mss = tcp_sk(sk)->advmss;
-       u32 icwnd = TCP_DEFAULT_INIT_RCVWND;
+       u32 icwnd = sysctl_tcp_default_init_rwnd;
        int rcvmem;
 
        /* Limit to 10 segments if mss <= 1460,
         * or 14600/mss segments, with a minimum of two segments.
         */
        if (mss > 1460)
-               icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
+               icwnd = max_t(u32, (1460 * icwnd) / mss, 2);
 
        rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER);
        while (tcp_win_from_space(rcvmem) < mss)
index 5d87806d3adebedb2f68f94a407627222c387529..1d62d94ac7e1b8963e5b5b5788c020fbc7ce371e 100644 (file)
@@ -1527,6 +1527,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        ireq->rmt_addr = saddr;
        ireq->no_srccheck = inet_sk(sk)->transparent;
        ireq->opt = tcp_v4_save_options(skb);
+       ireq->ir_mark = inet_request_mark(sk, skb);
 
        if (security_inet_conn_request(sk, skb, req))
                goto drop_and_free;
index 56e29f0e230e4a6c06a24d7a5ee08856a644f697..3cff1f23beb76bf695f2b06c4a5138949e988e64 100644 (file)
@@ -231,14 +231,13 @@ void tcp_select_initial_window(int __space, __u32 mss,
        }
 
        /* Set initial window to a value enough for senders starting with
-        * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
+        * initial congestion window of sysctl_tcp_default_init_rwnd. Place
         * a limit on the initial window when mss is larger than 1460.
         */
        if (mss > (1 << *rcv_wscale)) {
-               int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
+               int init_cwnd = sysctl_tcp_default_init_rwnd;
                if (mss > 1460)
-                       init_cwnd =
-                       max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
+                       init_cwnd = max_t(u32, (1460 * init_cwnd) / mss, 2);
                /* when initializing use the value from init_rcv_wnd
                 * rather than the default from above
                 */
index 9af088d2cdaab1f4489f092f9a473d70b5c49667..470a9c008e9b9e921045bb5b11b77569e53ac5c9 100644 (file)
@@ -7,7 +7,7 @@ obj-$(CONFIG_IPV6) += ipv6.o
 ipv6-objs :=   af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
                addrlabel.o \
                route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \
-               raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
+               raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o ping.o \
                exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o
 
 ipv6-offload :=        ip6_offload.o tcpv6_offload.o udp_offload.o exthdrs_offload.o
index 7bcdd0df68dbad627c6578b7efd83705e3abfc1c..263bd9ec652d0905c4f6eefb0bb59c4add8738b1 100644 (file)
@@ -198,6 +198,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
+       .accept_ra_rt_table     = 0,
        .proxy_ndp              = 0,
        .accept_source_route    = 0,    /* we do not accept RH0 by default. */
        .disable_ipv6           = 0,
@@ -232,6 +233,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
        .accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
+       .accept_ra_rt_table     = 0,
        .proxy_ndp              = 0,
        .accept_source_route    = 0,    /* we do not accept RH0 by default. */
        .disable_ipv6           = 0,
@@ -1946,6 +1948,31 @@ static void  __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmp
 }
 #endif
 
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table) {
+       /* Determines into what table to put autoconf PIO/RIO/default routes
+        * learned on this device.
+        *
+        * - If 0, use the same table for every device. This puts routes into
+        *   one of RT_TABLE_{PREFIX,INFO,DFLT} depending on the type of route
+        *   (but note that these three are currently all equal to
+        *   RT6_TABLE_MAIN).
+        * - If > 0, use the specified table.
+        * - If < 0, put routes into table dev->ifindex + (-rt_table).
+        */
+       struct inet6_dev *idev = in6_dev_get(dev);
+       u32 table;
+       int sysctl = idev->cnf.accept_ra_rt_table;
+       if (sysctl == 0) {
+               table = default_table;
+       } else if (sysctl > 0) {
+               table = (u32) sysctl;
+       } else {
+               table = (unsigned) dev->ifindex + (-sysctl);
+       }
+       in6_dev_put(idev);
+       return table;
+}
+
 /*
  *     Add prefix route.
  */
@@ -1955,7 +1982,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
                      unsigned long expires, u32 flags)
 {
        struct fib6_config cfg = {
-               .fc_table = RT6_TABLE_PREFIX,
+               .fc_table = addrconf_rt_table(dev, RT6_TABLE_PREFIX),
                .fc_metric = IP6_RT_PRIO_ADDRCONF,
                .fc_ifindex = dev->ifindex,
                .fc_expires = expires,
@@ -1989,7 +2016,8 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
        struct rt6_info *rt = NULL;
        struct fib6_table *table;
 
-       table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
+       table = fib6_get_table(dev_net(dev),
+                              addrconf_rt_table(dev, RT6_TABLE_PREFIX));
        if (table == NULL)
                return NULL;
 
@@ -4195,6 +4223,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
 #endif
 #endif
+       array[DEVCONF_ACCEPT_RA_RT_TABLE] = cnf->accept_ra_rt_table;
        array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
        array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -4903,6 +4932,13 @@ static struct addrconf_sysctl_table
                },
 #endif
 #endif
+               {
+                       .procname       = "accept_ra_rt_table",
+                       .data           = &ipv6_devconf.accept_ra_rt_table,
+                       .maxlen         = sizeof(int),
+                       .mode           = 0644,
+                       .proc_handler   = proc_dointvec,
+               },
                {
                        .procname       = "proxy_ndp",
                        .data           = &ipv6_devconf.proxy_ndp,
index ab5c7ad482cded367c1180a3a3761615bc2a4d54..a4cfde67fcb7dd664bb9cde4f41fab2f406b30d6 100644 (file)
@@ -49,6 +49,7 @@
 #include <net/udp.h>
 #include <net/udplite.h>
 #include <net/tcp.h>
+#include <net/ping.h>
 #include <net/protocol.h>
 #include <net/inet_common.h>
 #include <net/route.h>
 #include <asm/uaccess.h>
 #include <linux/mroute6.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+       return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+       return 1;
+}
+#endif
+
 MODULE_AUTHOR("Cast of dozens");
 MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
 MODULE_LICENSE("GPL");
@@ -108,6 +123,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
        int try_loading_module = 0;
        int err;
 
+       if (!current_has_network())
+               return -EACCES;
+
        if (sock->type != SOCK_RAW &&
            sock->type != SOCK_DGRAM &&
            !inet_ehash_secret)
@@ -159,8 +177,7 @@ lookup_protocol:
        }
 
        err = -EPERM;
-       if (sock->type == SOCK_RAW && !kern &&
-           !ns_capable(net->user_ns, CAP_NET_RAW))
+       if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
                goto out_rcu_unlock;
 
        sock->ops = answer->ops;
@@ -477,6 +494,21 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
 }
 EXPORT_SYMBOL(inet6_getname);
 
+int inet6_killaddr_ioctl(struct net *net, void __user *arg) {
+       struct in6_ifreq ireq;
+       struct sockaddr_in6 sin6;
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EACCES;
+
+       if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
+               return -EFAULT;
+
+       sin6.sin6_family = AF_INET6;
+       sin6.sin6_addr = ireq.ifr6_addr;
+       return tcp_nuke_addr(net, (struct sockaddr *) &sin6);
+}
+
 int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 {
        struct sock *sk = sock->sk;
@@ -500,6 +532,8 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                return addrconf_del_ifaddr(net, (void __user *) arg);
        case SIOCSIFDSTADDR:
                return addrconf_set_dstaddr(net, (void __user *) arg);
+       case SIOCKILLADDR:
+               return inet6_killaddr_ioctl(net, (void __user *) arg);
        default:
                if (!sk->sk_prot->ioctl)
                        return -ENOIOCTLCMD;
@@ -840,6 +874,9 @@ static int __init inet6_init(void)
        if (err)
                goto out_unregister_udplite_proto;
 
+       err = proto_register(&pingv6_prot, 1);
+       if (err)
+               goto out_unregister_ping_proto;
 
        /* We MUST register RAW sockets before we create the ICMP6,
         * IGMP6, or NDISC control sockets.
@@ -930,6 +967,10 @@ static int __init inet6_init(void)
        if (err)
                goto ipv6_packet_fail;
 
+       err = pingv6_init();
+       if (err)
+               goto pingv6_fail;
+
 #ifdef CONFIG_SYSCTL
        err = ipv6_sysctl_register();
        if (err)
@@ -942,6 +983,8 @@ out:
 sysctl_fail:
        ipv6_packet_cleanup();
 #endif
+pingv6_fail:
+       pingv6_exit();
 ipv6_packet_fail:
        tcpv6_exit();
 tcpv6_fail:
@@ -985,6 +1028,8 @@ register_pernet_fail:
        rtnl_unregister_all(PF_INET6);
 out_sock_register_fail:
        rawv6_exit();
+out_unregister_ping_proto:
+       proto_unregister(&pingv6_prot);
 out_unregister_raw_proto:
        proto_unregister(&rawv6_prot);
 out_unregister_udplite_proto:
index 8997340e37429960cc499eb3067ce71ed99a09d9..1aef8b22ba73494a7edc0f7edba0a2541d057420 100644 (file)
@@ -318,7 +318,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
 /*
  *     Handle MSG_ERRQUEUE
  */
-int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct sock_exterr_skb *serr;
@@ -369,7 +369,6 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
                                               &sin->sin6_addr);
                        sin->sin6_scope_id = 0;
                }
-               *addr_len = sizeof(*sin);
        }
 
        memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
index 51af9d0d019a5ae22563918bed4e617a2db122ce..11de7379fb9a8ea04eec99c9486c7536f9ff9045 100644 (file)
@@ -166,15 +166,15 @@ EXPORT_SYMBOL_GPL(ipv6_find_tlv);
  * to explore inner IPv6 header, eg. ICMPv6 error messages.
  *
  * If target header is found, its offset is set in *offset and return protocol
- * number. Otherwise, return -1.
+ * number. Otherwise, return -ENOENT or -EBADMSG.
  *
  * If the first fragment doesn't contain the final protocol header or
  * NEXTHDR_NONE it is considered invalid.
  *
  * Note that non-1st fragment is special case that "the protocol number
  * of last header" is "next header" field in Fragment header. In this case,
- * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
- * isn't NULL.
+ * *offset is meaningless. If fragoff is not NULL, the fragment offset is
+ * stored in *fragoff; if it is NULL, return -EINVAL.
  *
  * if flags is not NULL and it's a fragment, then the frag flag
  * IP6_FH_F_FRAG will be set. If it's an AH header, the
@@ -253,9 +253,12 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
                                if (target < 0 &&
                                    ((!ipv6_ext_hdr(hp->nexthdr)) ||
                                     hp->nexthdr == NEXTHDR_NONE)) {
-                                       if (fragoff)
+                                       if (fragoff) {
                                                *fragoff = _frag_off;
-                                       return hp->nexthdr;
+                                               return hp->nexthdr;
+                                       } else {
+                                               return -EINVAL;
+                                       }
                                }
                                return -ENOENT;
                        }
index 2dee1d9d73052d544feafad2e62ff14e8f586e39..84bdcd06dd34bcb89bc2d13a1750c7d0f19dec40 100644 (file)
@@ -57,6 +57,7 @@
 
 #include <net/ipv6.h>
 #include <net/ip6_checksum.h>
+#include <net/ping.h>
 #include <net/protocol.h>
 #include <net/raw.h>
 #include <net/rawv6.h>
@@ -84,12 +85,18 @@ static inline struct sock *icmpv6_sk(struct net *net)
 static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                       u8 type, u8 code, int offset, __be32 info)
 {
+       /* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */
+       struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset);
        struct net *net = dev_net(skb->dev);
 
        if (type == ICMPV6_PKT_TOOBIG)
                ip6_update_pmtu(skb, net, info, 0, 0);
        else if (type == NDISC_REDIRECT)
                ip6_redirect(skb, net, 0, 0);
+
+       if (!(type & ICMPV6_INFOMSG_MASK))
+               if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
+                       ping_err(skb, offset, info);
 }
 
 static int icmpv6_rcv(struct sk_buff *skb);
@@ -224,7 +231,8 @@ static bool opt_unrec(struct sk_buff *skb, __u32 offset)
        return (*op & 0xC0) == 0x80;
 }
 
-static int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct icmp6hdr *thdr, int len)
+int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+                              struct icmp6hdr *thdr, int len)
 {
        struct sk_buff *skb;
        struct icmp6hdr *icmp6h;
@@ -307,8 +315,8 @@ static void mip6_addr_swap(struct sk_buff *skb)
 static inline void mip6_addr_swap(struct sk_buff *skb) {}
 #endif
 
-static struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
-                                            struct sock *sk, struct flowi6 *fl6)
+struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
+                                     struct sock *sk, struct flowi6 *fl6)
 {
        struct dst_entry *dst, *dst2;
        struct flowi6 fl2;
@@ -389,6 +397,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        int len;
        int hlimit;
        int err = 0;
+       u32 mark = IP6_REPLY_MARK(net, skb->mark);
 
        if ((u8 *)hdr < skb->head ||
            (skb->network_header + sizeof(*hdr)) > skb->tail)
@@ -454,6 +463,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        fl6.daddr = hdr->saddr;
        if (saddr)
                fl6.saddr = *saddr;
+       fl6.flowi6_mark = mark;
        fl6.flowi6_oif = iif;
        fl6.fl6_icmp_type = type;
        fl6.fl6_icmp_code = code;
@@ -462,6 +472,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        sk = icmpv6_xmit_lock(net);
        if (sk == NULL)
                return;
+       sk->sk_mark = mark;
        np = inet6_sk(sk);
 
        if (!icmpv6_xrlim_allow(sk, type, &fl6))
@@ -543,6 +554,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        struct dst_entry *dst;
        int err = 0;
        int hlimit;
+       u32 mark = IP6_REPLY_MARK(net, skb->mark);
 
        saddr = &ipv6_hdr(skb)->daddr;
 
@@ -559,11 +571,13 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
                fl6.saddr = *saddr;
        fl6.flowi6_oif = skb->dev->ifindex;
        fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
+       fl6.flowi6_mark = mark;
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
        sk = icmpv6_xmit_lock(net);
        if (sk == NULL)
                return;
+       sk->sk_mark = mark;
        np = inet6_sk(sk);
 
        if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
@@ -697,7 +711,8 @@ static int icmpv6_rcv(struct sk_buff *skb)
                skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
                                             IPPROTO_ICMPV6, 0));
                if (__skb_checksum_complete(skb)) {
-                       LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%pI6 > %pI6]\n",
+                       LIMIT_NETDEBUG(KERN_DEBUG
+                                      "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
                                       saddr, daddr);
                        goto csum_error;
                }
@@ -718,7 +733,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
                break;
 
        case ICMPV6_ECHO_REPLY:
-               /* we couldn't care less */
+               ping_rcv(skb);
                break;
 
        case ICMPV6_PKT_TOOBIG:
index e4311cbc8b4ecbf70ea1fb2e2f2415342be382cc..f1493138d21e237de7fdeb150b3053962690f71a 100644 (file)
@@ -81,7 +81,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
        final_p = fl6_update_dst(fl6, np->opt, &final);
        fl6->saddr = treq->loc_addr;
        fl6->flowi6_oif = treq->iif;
-       fl6->flowi6_mark = sk->sk_mark;
+       fl6->flowi6_mark = inet_rsk(req)->ir_mark;
        fl6->fl6_dport = inet_rsk(req)->rmt_port;
        fl6->fl6_sport = inet_rsk(req)->loc_port;
        security_req_classify_flow(req, flowi6_to_flowi(fl6));
index 4433ab40e7de05a2b518e20dacfa370d736bded3..7f45af3e812807d7b8bb2c77f82e47d579a65559 100644 (file)
@@ -153,6 +153,18 @@ config IP6_NF_TARGET_REJECT
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config IP6_NF_TARGET_REJECT_SKERR
+       bool "Force socket error when rejecting with icmp*"
+       depends on IP6_NF_TARGET_REJECT
+       default n
+       help
+          This option enables turning a "--reject-with icmp*" into a matching
+          socket error also.
+         The REJECT target normally allows sending an ICMP message. But it
+          leaves the local socket unaware of any ingress rejects.
+
+         If unsure, say N.
+
 config IP6_NF_MANGLE
        tristate "Packet mangling"
        default m if NETFILTER_ADVANCED=n
index 70f9abc0efe9a0ba0bc8fdbb6060f3bfe593291f..573c232239b0a21c8c636d289373d16c349eba5f 100644 (file)
@@ -180,6 +180,15 @@ send_unreach(struct net *net, struct sk_buff *skb_in, unsigned char code,
                skb_in->dev = net->loopback_dev;
 
        icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+#ifdef CONFIG_IP6_NF_TARGET_REJECT_SKERR
+       if (skb_in->sk) {
+               icmpv6_err_convert(ICMPV6_DEST_UNREACH, code,
+                                  &skb_in->sk->sk_err);
+               skb_in->sk->sk_error_report(skb_in->sk);
+               pr_debug("ip6t_REJECT: sk_err=%d for skb=%p sk=%p\n",
+                       skb_in->sk->sk_err, skb_in, skb_in->sk);
+       }
+#endif
 }
 
 static unsigned int
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
new file mode 100644 (file)
index 0000000..8d1c206
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+ * INET                An implementation of the TCP/IP protocol suite for the LINUX
+ *             operating system.  INET is implemented using the  BSD Socket
+ *             interface as the means of communication with the user level.
+ *
+ *             "Ping" sockets
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ *
+ * Based on ipv4/ping.c code.
+ *
+ * Authors:    Lorenzo Colitti (IPv6 support)
+ *             Vasiliy Kulikov / Openwall (IPv4 implementation, for Linux 2.6),
+ *             Pavel Kankovsky (IPv4 implementation, for Linux 2.4.32)
+ *
+ */
+
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/protocol.h>
+#include <net/udp.h>
+#include <net/transp_v6.h>
+#include <net/ping.h>
+
+struct proto pingv6_prot = {
+       .name =         "PINGv6",
+       .owner =        THIS_MODULE,
+       .init =         ping_init_sock,
+       .close =        ping_close,
+       .connect =      ip6_datagram_connect,
+       .disconnect =   udp_disconnect,
+       .setsockopt =   ipv6_setsockopt,
+       .getsockopt =   ipv6_getsockopt,
+       .sendmsg =      ping_v6_sendmsg,
+       .recvmsg =      ping_recvmsg,
+       .bind =         ping_bind,
+       .backlog_rcv =  ping_queue_rcv_skb,
+       .hash =         ping_hash,
+       .unhash =       ping_unhash,
+       .get_port =     ping_get_port,
+       .obj_size =     sizeof(struct raw6_sock),
+};
+EXPORT_SYMBOL_GPL(pingv6_prot);
+
+static struct inet_protosw pingv6_protosw = {
+       .type =      SOCK_DGRAM,
+       .protocol =  IPPROTO_ICMPV6,
+       .prot =      &pingv6_prot,
+       .ops =       &inet6_dgram_ops,
+       .no_check =  UDP_CSUM_DEFAULT,
+       .flags =     INET_PROTOSW_REUSE,
+};
+
+
+/* Compatibility glue so we can support IPv6 when it's compiled as a module */
+int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
+{
+       return -EAFNOSUPPORT;
+}
+int dummy_ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
+                                struct sk_buff *skb)
+{
+       return -EAFNOSUPPORT;
+}
+int dummy_icmpv6_err_convert(u8 type, u8 code, int *err)
+{
+       return -EAFNOSUPPORT;
+}
+void dummy_ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
+                           __be16 port, u32 info, u8 *payload) {}
+int dummy_ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
+                       const struct net_device *dev, int strict)
+{
+       return 0;
+}
+
+int __init pingv6_init(void)
+{
+       pingv6_ops.ipv6_recv_error = ipv6_recv_error;
+       pingv6_ops.ip6_datagram_recv_ctl = ip6_datagram_recv_ctl;
+       pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
+       pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
+       pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
+       return inet6_register_protosw(&pingv6_protosw);
+}
+
+/* This never gets called because it's not possible to unload the ipv6 module,
+ * but just in case.
+ */
+void pingv6_exit(void)
+{
+       pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
+       pingv6_ops.ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl;
+       pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
+       pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
+       pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
+       inet6_unregister_protosw(&pingv6_protosw);
+}
+
+int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                   size_t len)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+       struct icmp6hdr user_icmph;
+       int addr_type;
+       struct in6_addr *daddr;
+       int iif = 0;
+       struct flowi6 fl6;
+       int err;
+       int hlimit;
+       struct dst_entry *dst;
+       struct rt6_info *rt;
+       struct pingfakehdr pfh;
+
+       pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
+
+       err = ping_common_sendmsg(AF_INET6, msg, len, &user_icmph,
+                                 sizeof(user_icmph));
+       if (err)
+               return err;
+
+       if (msg->msg_name) {
+               struct sockaddr_in6 *u = (struct sockaddr_in6 *) msg->msg_name;
+               if (msg->msg_namelen < sizeof(struct sockaddr_in6) ||
+                   u->sin6_family != AF_INET6) {
+                       return -EINVAL;
+               }
+               if (sk->sk_bound_dev_if &&
+                   sk->sk_bound_dev_if != u->sin6_scope_id) {
+                       return -EINVAL;
+               }
+               daddr = &(u->sin6_addr);
+               iif = u->sin6_scope_id;
+       } else {
+               if (sk->sk_state != TCP_ESTABLISHED)
+                       return -EDESTADDRREQ;
+               daddr = &np->daddr;
+       }
+
+       if (!iif)
+               iif = sk->sk_bound_dev_if;
+
+       addr_type = ipv6_addr_type(daddr);
+       if (__ipv6_addr_needs_scope_id(addr_type) && !iif)
+               return -EINVAL;
+       if (addr_type & IPV6_ADDR_MAPPED)
+               return -EINVAL;
+
+       /* TODO: use ip6_datagram_send_ctl to get options from cmsg */
+
+       memset(&fl6, 0, sizeof(fl6));
+
+       fl6.flowi6_proto = IPPROTO_ICMPV6;
+       fl6.saddr = np->saddr;
+       fl6.daddr = *daddr;
+       fl6.flowi6_mark = sk->sk_mark;
+       fl6.fl6_icmp_type = user_icmph.icmp6_type;
+       fl6.fl6_icmp_code = user_icmph.icmp6_code;
+       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+
+       if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+               fl6.flowi6_oif = np->mcast_oif;
+       else if (!fl6.flowi6_oif)
+               fl6.flowi6_oif = np->ucast_oif;
+
+       dst = ip6_sk_dst_lookup_flow(sk, &fl6,  daddr, 1);
+       if (IS_ERR(dst))
+               return PTR_ERR(dst);
+       rt = (struct rt6_info *) dst;
+
+       np = inet6_sk(sk);
+       if (!np)
+               return -EBADF;
+
+       if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+               fl6.flowi6_oif = np->mcast_oif;
+       else if (!fl6.flowi6_oif)
+               fl6.flowi6_oif = np->ucast_oif;
+
+       pfh.icmph.type = user_icmph.icmp6_type;
+       pfh.icmph.code = user_icmph.icmp6_code;
+       pfh.icmph.checksum = 0;
+       pfh.icmph.un.echo.id = inet->inet_sport;
+       pfh.icmph.un.echo.sequence = user_icmph.icmp6_sequence;
+       pfh.iov = msg->msg_iov;
+       pfh.wcheck = 0;
+       pfh.family = AF_INET6;
+
+       if (ipv6_addr_is_multicast(&fl6.daddr))
+               hlimit = np->mcast_hops;
+       else
+               hlimit = np->hop_limit;
+       if (hlimit < 0)
+               hlimit = ip6_dst_hoplimit(dst);
+
+       lock_sock(sk);
+       err = ip6_append_data(sk, ping_getfrag, &pfh, len,
+                             0, hlimit,
+                             np->tclass, NULL, &fl6, rt,
+                             MSG_DONTWAIT, np->dontfrag);
+
+       if (err) {
+               ICMP6_INC_STATS_BH(sock_net(sk), rt->rt6i_idev,
+                                  ICMP6_MIB_OUTERRORS);
+               ip6_flush_pending_frames(sk);
+       } else {
+               err = icmpv6_push_pending_frames(sk, &fl6,
+                                                (struct icmp6hdr *) &pfh.icmph,
+                                                len);
+       }
+       release_sock(sk);
+
+       if (err)
+               return err;
+
+       return len;
+}
index 464b1c9c08e49f258fd1a48bcd9c49aff7b05cd5..dff1f4b2c668f94fcb48582ce77975e9b214091b 100644 (file)
@@ -459,8 +459,10 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
        if (flags & MSG_OOB)
                return -EOPNOTSUPP;
 
-       if (flags & MSG_ERRQUEUE)
-               return ipv6_recv_error(sk, msg, len, addr_len);
+       if (flags & MSG_ERRQUEUE) {
+               *addr_len = sizeof(struct sockaddr_in6);
+               return ipv6_recv_error(sk, msg, len);
+       }
 
        if (np->rxpmtu && np->rxopt.bits.rxpmtu)
                return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
index b2614b22622baa266ad801a065d9e88698fd9df6..18e27efa1bc31db0ea47eef6d109c971ede82a69 100644 (file)
@@ -93,13 +93,12 @@ static void         rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
                                        struct sk_buff *skb);
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
-static struct rt6_info *rt6_add_route_info(struct net *net,
+static struct rt6_info *rt6_add_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
-                                          const struct in6_addr *gwaddr, int ifindex,
-                                          unsigned int pref);
-static struct rt6_info *rt6_get_route_info(struct net *net,
+                                          const struct in6_addr *gwaddr, unsigned int pref);
+static struct rt6_info *rt6_get_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
-                                          const struct in6_addr *gwaddr, int ifindex);
+                                          const struct in6_addr *gwaddr);
 #endif
 
 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
@@ -685,7 +684,6 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
                  const struct in6_addr *gwaddr)
 {
-       struct net *net = dev_net(dev);
        struct route_info *rinfo = (struct route_info *) opt;
        struct in6_addr prefix_buf, *prefix;
        unsigned int pref;
@@ -730,8 +728,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
        if (rinfo->prefix_len == 0)
                rt = rt6_get_dflt_router(gwaddr, dev);
        else
-               rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
-                                       gwaddr, dev->ifindex);
+               rt = rt6_get_route_info(dev, prefix, rinfo->prefix_len, gwaddr);
 
        if (rt && !lifetime) {
                ip6_del_rt(rt);
@@ -739,8 +736,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
        }
 
        if (!rt && lifetime)
-               rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
-                                       pref);
+               rt = rt6_add_route_info(dev, prefix, rinfo->prefix_len, gwaddr, pref);
        else if (rt)
                rt->rt6i_flags = RTF_ROUTEINFO |
                                 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
@@ -1161,7 +1157,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
-       fl6.flowi6_mark = mark;
+       fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
        fl6.flowi6_flags = 0;
        fl6.daddr = iph->daddr;
        fl6.saddr = iph->saddr;
@@ -1852,15 +1848,16 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
 }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
-static struct rt6_info *rt6_get_route_info(struct net *net,
+static struct rt6_info *rt6_get_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
-                                          const struct in6_addr *gwaddr, int ifindex)
+                                          const struct in6_addr *gwaddr)
 {
        struct fib6_node *fn;
        struct rt6_info *rt = NULL;
        struct fib6_table *table;
 
-       table = fib6_get_table(net, RT6_TABLE_INFO);
+       table = fib6_get_table(dev_net(dev),
+                              addrconf_rt_table(dev, RT6_TABLE_INFO));
        if (!table)
                return NULL;
 
@@ -1870,7 +1867,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
                goto out;
 
        for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
-               if (rt->dst.dev->ifindex != ifindex)
+               if (rt->dst.dev->ifindex != dev->ifindex)
                        continue;
                if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
                        continue;
@@ -1884,21 +1881,20 @@ out:
        return rt;
 }
 
-static struct rt6_info *rt6_add_route_info(struct net *net,
+static struct rt6_info *rt6_add_route_info(struct net_device *dev,
                                           const struct in6_addr *prefix, int prefixlen,
-                                          const struct in6_addr *gwaddr, int ifindex,
-                                          unsigned int pref)
+                                          const struct in6_addr *gwaddr, unsigned int pref)
 {
        struct fib6_config cfg = {
-               .fc_table       = RT6_TABLE_INFO,
+               .fc_table       = addrconf_rt_table(dev, RT6_TABLE_INFO),
                .fc_metric      = IP6_RT_PRIO_USER,
-               .fc_ifindex     = ifindex,
+               .fc_ifindex     = dev->ifindex,
                .fc_dst_len     = prefixlen,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
                                  RTF_UP | RTF_PREF(pref),
                .fc_nlinfo.portid = 0,
                .fc_nlinfo.nlh = NULL,
-               .fc_nlinfo.nl_net = net,
+               .fc_nlinfo.nl_net = dev_net(dev),
        };
 
        cfg.fc_dst = *prefix;
@@ -1910,7 +1906,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
 
        ip6_route_add(&cfg);
 
-       return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
+       return rt6_get_route_info(dev, prefix, prefixlen, gwaddr);
 }
 #endif
 
@@ -1919,7 +1915,8 @@ struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_dev
        struct rt6_info *rt;
        struct fib6_table *table;
 
-       table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
+       table = fib6_get_table(dev_net(dev),
+                              addrconf_rt_table(dev, RT6_TABLE_MAIN));
        if (!table)
                return NULL;
 
@@ -1941,7 +1938,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
                                     unsigned int pref)
 {
        struct fib6_config cfg = {
-               .fc_table       = RT6_TABLE_DFLT,
+               .fc_table       = addrconf_rt_table(dev, RT6_TABLE_DFLT),
                .fc_metric      = IP6_RT_PRIO_USER,
                .fc_ifindex     = dev->ifindex,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
@@ -1958,28 +1955,17 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
        return rt6_get_dflt_router(gwaddr, dev);
 }
 
-void rt6_purge_dflt_routers(struct net *net)
-{
-       struct rt6_info *rt;
-       struct fib6_table *table;
 
-       /* NOTE: Keep consistent with rt6_get_dflt_router */
-       table = fib6_get_table(net, RT6_TABLE_DFLT);
-       if (!table)
-               return;
+int rt6_addrconf_purge(struct rt6_info *rt, void *arg) {
+       if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
+           (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2))
+               return -1;
+       return 0;
+}
 
-restart:
-       read_lock_bh(&table->tb6_lock);
-       for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
-               if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
-                   (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
-                       dst_hold(&rt->dst);
-                       read_unlock_bh(&table->tb6_lock);
-                       ip6_del_rt(rt);
-                       goto restart;
-               }
-       }
-       read_unlock_bh(&table->tb6_lock);
+void rt6_purge_dflt_routers(struct net *net)
+{
+       fib6_clean_all(net, rt6_addrconf_purge, 0, NULL);
 }
 
 static void rtmsg_to_fib6_config(struct net *net,
index d5dda20bd717404a07d4dd45567b5bba23997250..1efbc6f44a6a73d727ffdb0c1a2e1d71051bcaf5 100644 (file)
@@ -212,6 +212,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
            ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
                ireq6->iif = inet6_iif(skb);
 
+       ireq->ir_mark = inet_request_mark(sk, skb);
+
        req->expires = 0UL;
        req->num_retrans = 0;
        ireq->ecn_ok            = ecn_ok;
@@ -238,7 +240,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                final_p = fl6_update_dst(&fl6, np->opt, &final);
                fl6.saddr = ireq6->loc_addr;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
-               fl6.flowi6_mark = sk->sk_mark;
+               fl6.flowi6_mark = ireq->ir_mark;
                fl6.fl6_dport = inet_rsk(req)->rmt_port;
                fl6.fl6_sport = inet_sk(sk)->inet_sport;
                security_req_classify_flow(req, flowi6_to_flowi(&fl6));
index e85c48bd404f4036b0c4e7db1e29bd6a92192f75..53a9f5a64536fd7b55199b17b323a76d80841049 100644 (file)
@@ -24,6 +24,13 @@ static ctl_table ipv6_table_template[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "fwmark_reflect",
+               .data           = &init_net.ipv6.sysctl.fwmark_reflect,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
        { }
 };
 
index 66c718854e5aa28543fe7352c6546e718b25e7fc..747374c9887639a931c3969c0feebfa85cbdd330 100644 (file)
@@ -791,6 +791,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
        fl6.flowi6_proto = IPPROTO_TCP;
        if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
                fl6.flowi6_oif = inet6_iif(skb);
+       fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
        fl6.fl6_dport = t1->dest;
        fl6.fl6_sport = t1->source;
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@ -999,6 +1000,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                TCP_ECN_create_request(req, skb, sock_net(sk));
 
        treq->iif = sk->sk_bound_dev_if;
+       inet_rsk(req)->ir_mark = inet_request_mark(sk, skb);
 
        /* So that link locals have meaning */
        if (!sk->sk_bound_dev_if &&
index 6b298dc614e3d2d926d4962d93d3d5d52fdeb251..c46539a1df565c3abf4dd87926f21b58e371e567 100644 (file)
@@ -373,8 +373,10 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
        int is_udp4;
        bool slow;
 
-       if (flags & MSG_ERRQUEUE)
-               return ipv6_recv_error(sk, msg, len, addr_len);
+       if (flags & MSG_ERRQUEUE) {
+               *addr_len = sizeof(struct sockaddr_in6);
+               return ipv6_recv_error(sk, msg, len);
+       }
 
        if (np->rxpmtu && np->rxopt.bits.rxpmtu)
                return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
index 56d22cae590683c9a3b530ad0152b105d5c19c2d..bdf9c60b296a053b1058a418fb7c7d60e9885af1 100644 (file)
@@ -1089,6 +1089,8 @@ config NETFILTER_XT_MATCH_OWNER
        based on who created the socket: the user or group. It is also
        possible to check whether a socket actually exists.
 
+       Conflicts with '"quota, tag, uid" match'
+
 config NETFILTER_XT_MATCH_POLICY
        tristate 'IPsec "policy" match support'
        depends on XFRM
@@ -1122,6 +1124,22 @@ config NETFILTER_XT_MATCH_PKTTYPE
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_QTAGUID
+       bool '"quota, tag, owner" match and stats support'
+        depends on NETFILTER_XT_MATCH_SOCKET
+       depends on NETFILTER_XT_MATCH_OWNER=n
+       help
+         This option replaces the `owner' match. In addition to matching
+         on uid, it keeps stats based on a tag assigned to a socket.
+         The full tag is comprised of a UID and an accounting tag.
+         The tags are assignable to sockets from user space (e.g. a download
+         manager can assign the socket to another UID for accounting).
+         Stats and control are done via /proc/net/xt_qtaguid/.
+         It replaces owner as it takes the same arguments, but should
+         really be recognized by the iptables tool.
+
+         If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_QUOTA
        tristate '"quota" match support'
        depends on NETFILTER_ADVANCED
@@ -1132,6 +1150,30 @@ config NETFILTER_XT_MATCH_QUOTA
          If you want to compile it as a module, say M here and read
          <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
 
+config NETFILTER_XT_MATCH_QUOTA2
+       tristate '"quota2" match support'
+       depends on NETFILTER_ADVANCED
+       help
+         This option adds a `quota2' match, which allows to match on a
+         byte counter correctly and not per CPU.
+         It allows naming the quotas.
+         This is based on http://xtables-addons.git.sourceforge.net
+
+         If you want to compile it as a module, say M here and read
+         <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_QUOTA2_LOG
+       bool '"quota2" Netfilter LOG support'
+       depends on NETFILTER_XT_MATCH_QUOTA2
+       depends on IP_NF_TARGET_ULOG=n    # not yes, not module, just no
+       default n
+       help
+         This option allows `quota2' to log ONCE when a quota limit
+         is passed. It logs via NETLINK using the NETLINK_NFLOG family.
+         It logs similarly to how ipt_ULOG would without data.
+
+         If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_RATEEST
        tristate '"rateest" match support'
        depends on NETFILTER_ADVANCED
index a1abf87d43bfbd902f82cc9f8aae156d936cd89a..d9655f6f3466cec6584de319dc3a5ade586a2dc3 100644 (file)
@@ -127,7 +127,9 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_OWNER) += xt_owner.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid_print.o xt_qtaguid.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o
index f407ebc13481ae5caa0f634db643d034c7af7e6a..f6562ba97a978a8b9deeb433e922d5d6fb28ea8f 100644 (file)
@@ -5,6 +5,7 @@
  * After timer expires a kevent will be sent.
  *
  * Copyright (C) 2004, 2010 Nokia Corporation
+ *
  * Written by Timo Teras <ext-timo.teras@nokia.com>
  *
  * Converted to x_tables and reworked for upstream inclusion
 #include <linux/netfilter/xt_IDLETIMER.h>
 #include <linux/kdev_t.h>
 #include <linux/kobject.h>
+#include <linux/skbuff.h>
 #include <linux/workqueue.h>
 #include <linux/sysfs.h>
+#include <linux/rtc.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/suspend.h>
+#include <linux/notifier.h>
+#include <net/net_namespace.h>
 
 struct idletimer_tg_attr {
        struct attribute attr;
@@ -55,14 +63,97 @@ struct idletimer_tg {
        struct kobject *kobj;
        struct idletimer_tg_attr attr;
 
+       struct timespec delayed_timer_trigger;
+       struct timespec last_modified_timer;
+       struct timespec last_suspend_time;
+       struct notifier_block pm_nb;
+
+       int timeout;
        unsigned int refcnt;
+       bool work_pending;
+       bool send_nl_msg;
+       bool active;
 };
 
 static LIST_HEAD(idletimer_tg_list);
 static DEFINE_MUTEX(list_mutex);
+static DEFINE_SPINLOCK(timestamp_lock);
 
 static struct kobject *idletimer_tg_kobj;
 
+static bool check_for_delayed_trigger(struct idletimer_tg *timer,
+               struct timespec *ts)
+{
+       bool state;
+       struct timespec temp;
+       spin_lock_bh(&timestamp_lock);
+       timer->work_pending = false;
+       if ((ts->tv_sec - timer->last_modified_timer.tv_sec) > timer->timeout ||
+                       timer->delayed_timer_trigger.tv_sec != 0) {
+               state = false;
+               temp.tv_sec = timer->timeout;
+               temp.tv_nsec = 0;
+               if (timer->delayed_timer_trigger.tv_sec != 0) {
+                       temp = timespec_add(timer->delayed_timer_trigger, temp);
+                       ts->tv_sec = temp.tv_sec;
+                       ts->tv_nsec = temp.tv_nsec;
+                       timer->delayed_timer_trigger.tv_sec = 0;
+                       timer->work_pending = true;
+                       schedule_work(&timer->work);
+               } else {
+                       temp = timespec_add(timer->last_modified_timer, temp);
+                       ts->tv_sec = temp.tv_sec;
+                       ts->tv_nsec = temp.tv_nsec;
+               }
+       } else {
+               state = timer->active;
+       }
+       spin_unlock_bh(&timestamp_lock);
+       return state;
+}
+
+static void notify_netlink_uevent(const char *iface, struct idletimer_tg *timer)
+{
+       char iface_msg[NLMSG_MAX_SIZE];
+       char state_msg[NLMSG_MAX_SIZE];
+       char timestamp_msg[NLMSG_MAX_SIZE];
+       char *envp[] = { iface_msg, state_msg, timestamp_msg, NULL };
+       int res;
+       struct timespec ts;
+       uint64_t time_ns;
+       bool state;
+
+       res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
+                      iface);
+       if (NLMSG_MAX_SIZE <= res) {
+               pr_err("message too long (%d)", res);
+               return;
+       }
+
+       get_monotonic_boottime(&ts);
+       state = check_for_delayed_trigger(timer, &ts);
+       res = snprintf(state_msg, NLMSG_MAX_SIZE, "STATE=%s",
+                       state ? "active" : "inactive");
+
+       if (NLMSG_MAX_SIZE <= res) {
+               pr_err("message too long (%d)", res);
+               return;
+       }
+
+       time_ns = timespec_to_ns(&ts);
+       res = snprintf(timestamp_msg, NLMSG_MAX_SIZE, "TIME_NS=%llu", time_ns);
+       if (NLMSG_MAX_SIZE <= res) {
+               timestamp_msg[0] = '\0';
+               pr_err("message too long (%d)", res);
+       }
+
+       pr_debug("putting nlmsg: <%s> <%s>\n", iface_msg, state_msg);
+       kobject_uevent_env(idletimer_tg_kobj, KOBJ_CHANGE, envp);
+       return;
+
+
+}
+
 static
 struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
 {
@@ -83,6 +174,7 @@ static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
 {
        struct idletimer_tg *timer;
        unsigned long expires = 0;
+       unsigned long now = jiffies;
 
        mutex_lock(&list_mutex);
 
@@ -92,11 +184,15 @@ static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
 
        mutex_unlock(&list_mutex);
 
-       if (time_after(expires, jiffies))
+       if (time_after(expires, now))
                return sprintf(buf, "%u\n",
-                              jiffies_to_msecs(expires - jiffies) / 1000);
+                              jiffies_to_msecs(expires - now) / 1000);
 
-       return sprintf(buf, "0\n");
+       if (timer->send_nl_msg)
+               return sprintf(buf, "0 %d\n",
+                       jiffies_to_msecs(now - expires) / 1000);
+       else
+               return sprintf(buf, "0\n");
 }
 
 static void idletimer_tg_work(struct work_struct *work)
@@ -105,6 +201,9 @@ static void idletimer_tg_work(struct work_struct *work)
                                                  work);
 
        sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
+
+       if (timer->send_nl_msg)
+               notify_netlink_uevent(timer->attr.attr.name, timer);
 }
 
 static void idletimer_tg_expired(unsigned long data)
@@ -112,8 +211,55 @@ static void idletimer_tg_expired(unsigned long data)
        struct idletimer_tg *timer = (struct idletimer_tg *) data;
 
        pr_debug("timer %s expired\n", timer->attr.attr.name);
-
+       spin_lock_bh(&timestamp_lock);
+       timer->active = false;
+       timer->work_pending = true;
        schedule_work(&timer->work);
+       spin_unlock_bh(&timestamp_lock);
+}
+
+static int idletimer_resume(struct notifier_block *notifier,
+               unsigned long pm_event, void *unused)
+{
+       struct timespec ts;
+       unsigned long time_diff, now = jiffies;
+       struct idletimer_tg *timer = container_of(notifier,
+                       struct idletimer_tg, pm_nb);
+       if (!timer)
+               return NOTIFY_DONE;
+       switch (pm_event) {
+       case PM_SUSPEND_PREPARE:
+               get_monotonic_boottime(&timer->last_suspend_time);
+               break;
+       case PM_POST_SUSPEND:
+               spin_lock_bh(&timestamp_lock);
+               if (!timer->active) {
+                       spin_unlock_bh(&timestamp_lock);
+                       break;
+               }
+               /* since jiffies are not updated when suspended now represents
+                * the time it would have suspended */
+               if (time_after(timer->timer.expires, now)) {
+                       get_monotonic_boottime(&ts);
+                       ts = timespec_sub(ts, timer->last_suspend_time);
+                       time_diff = timespec_to_jiffies(&ts);
+                       if (timer->timer.expires > (time_diff + now)) {
+                               mod_timer_pending(&timer->timer,
+                                               (timer->timer.expires - time_diff));
+                       } else {
+                               del_timer(&timer->timer);
+                               timer->timer.expires = 0;
+                               timer->active = false;
+                               timer->work_pending = true;
+                               schedule_work(&timer->work);
+                       }
+               }
+               spin_unlock_bh(&timestamp_lock);
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_DONE;
 }
 
 static int idletimer_tg_create(struct idletimer_tg_info *info)
@@ -145,6 +291,20 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
        setup_timer(&info->timer->timer, idletimer_tg_expired,
                    (unsigned long) info->timer);
        info->timer->refcnt = 1;
+       info->timer->send_nl_msg = (info->send_nl_msg == 0) ? false : true;
+       info->timer->active = true;
+       info->timer->timeout = info->timeout;
+
+       info->timer->delayed_timer_trigger.tv_sec = 0;
+       info->timer->delayed_timer_trigger.tv_nsec = 0;
+       info->timer->work_pending = false;
+       get_monotonic_boottime(&info->timer->last_modified_timer);
+
+       info->timer->pm_nb.notifier_call = idletimer_resume;
+       ret = register_pm_notifier(&info->timer->pm_nb);
+       if (ret)
+               printk(KERN_WARNING "[%s] Failed to register pm notifier %d\n",
+                               __func__, ret);
 
        mod_timer(&info->timer->timer,
                  msecs_to_jiffies(info->timeout * 1000) + jiffies);
@@ -161,6 +321,34 @@ out:
        return ret;
 }
 
+static void reset_timer(const struct idletimer_tg_info *info)
+{
+       unsigned long now = jiffies;
+       struct idletimer_tg *timer = info->timer;
+       bool timer_prev;
+
+       spin_lock_bh(&timestamp_lock);
+       timer_prev = timer->active;
+       timer->active = true;
+       /* timer_prev is used to guard overflow problem in time_before*/
+       if (!timer_prev || time_before(timer->timer.expires, now)) {
+               pr_debug("Starting Checkentry timer (Expired, Jiffies): %lu, %lu\n",
+                               timer->timer.expires, now);
+               /* checks if there is a pending inactive notification*/
+               if (timer->work_pending)
+                       timer->delayed_timer_trigger = timer->last_modified_timer;
+               else {
+                       timer->work_pending = true;
+                       schedule_work(&timer->work);
+               }
+       }
+
+       get_monotonic_boottime(&timer->last_modified_timer);
+       mod_timer(&timer->timer,
+                       msecs_to_jiffies(info->timeout * 1000) + now);
+       spin_unlock_bh(&timestamp_lock);
+}
+
 /*
  * The actual xt_tables plugin.
  */
@@ -168,15 +356,23 @@ static unsigned int idletimer_tg_target(struct sk_buff *skb,
                                         const struct xt_action_param *par)
 {
        const struct idletimer_tg_info *info = par->targinfo;
+       unsigned long now = jiffies;
 
        pr_debug("resetting timer %s, timeout period %u\n",
                 info->label, info->timeout);
 
        BUG_ON(!info->timer);
 
-       mod_timer(&info->timer->timer,
-                 msecs_to_jiffies(info->timeout * 1000) + jiffies);
+       info->timer->active = true;
 
+       if (time_before(info->timer->timer.expires, now)) {
+               schedule_work(&info->timer->work);
+               pr_debug("Starting timer %s (Expired, Jiffies): %lu, %lu\n",
+                        info->label, info->timer->timer.expires, now);
+       }
+
+       /* TODO: Avoid modifying timers on each packet */
+       reset_timer(info);
        return XT_CONTINUE;
 }
 
@@ -185,7 +381,7 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
        struct idletimer_tg_info *info = par->targinfo;
        int ret;
 
-       pr_debug("checkentry targinfo%s\n", info->label);
+       pr_debug("checkentry targinfo %s\n", info->label);
 
        if (info->timeout == 0) {
                pr_debug("timeout value is zero\n");
@@ -204,9 +400,7 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
        info->timer = __idletimer_tg_find_by_label(info->label);
        if (info->timer) {
                info->timer->refcnt++;
-               mod_timer(&info->timer->timer,
-                         msecs_to_jiffies(info->timeout * 1000) + jiffies);
-
+               reset_timer(info);
                pr_debug("increased refcnt of timer %s to %u\n",
                         info->label, info->timer->refcnt);
        } else {
@@ -219,6 +413,7 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
        }
 
        mutex_unlock(&list_mutex);
+
        return 0;
 }
 
@@ -236,11 +431,12 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
                list_del(&info->timer->entry);
                del_timer_sync(&info->timer->timer);
                sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+               unregister_pm_notifier(&info->timer->pm_nb);
                kfree(info->timer->attr.attr.name);
                kfree(info->timer);
        } else {
                pr_debug("decreased refcnt of timer %s to %u\n",
-                        info->label, info->timer->refcnt);
+               info->label, info->timer->refcnt);
        }
 
        mutex_unlock(&list_mutex);
@@ -248,6 +444,7 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
 
 static struct xt_target idletimer_tg __read_mostly = {
        .name           = "IDLETIMER",
+       .revision       = 1,
        .family         = NFPROTO_UNSPEC,
        .target         = idletimer_tg_target,
        .targetsize     = sizeof(struct idletimer_tg_info),
@@ -313,3 +510,4 @@ MODULE_DESCRIPTION("Xtables: idle time monitor");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("ipt_IDLETIMER");
 MODULE_ALIAS("ip6t_IDLETIMER");
+MODULE_ALIAS("arpt_IDLETIMER");
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
new file mode 100644 (file)
index 0000000..f2c01df
--- /dev/null
@@ -0,0 +1,3012 @@
+/*
+ * Kernel iptables module to track stats for packets based on user tags.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * There are run-time debug flags enabled via the debug_mask module param, or
+ * via the DEFAULT_DEBUG_MASK. See xt_qtaguid_internal.h.
+ */
+#define DEBUG
+
+#include <linux/file.h>
+#include <linux/inetdevice.h>
+#include <linux/module.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_qtaguid.h>
+#include <linux/ratelimit.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/addrconf.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#endif
+
+#include <linux/netfilter/xt_socket.h>
+#include "xt_qtaguid_internal.h"
+#include "xt_qtaguid_print.h"
+#include "../../fs/proc/internal.h"
+
+/*
+ * We only use the xt_socket funcs within a similar context to avoid unexpected
+ * return values.
+ */
+#define XT_SOCKET_SUPPORTED_HOOKS \
+       ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN))
+
+
+static const char *module_procdirname = "xt_qtaguid";
+static struct proc_dir_entry *xt_qtaguid_procdir;
+
+static unsigned int proc_iface_perms = S_IRUGO;
+module_param_named(iface_perms, proc_iface_perms, uint, S_IRUGO | S_IWUSR);
+
+static struct proc_dir_entry *xt_qtaguid_stats_file;
+static unsigned int proc_stats_perms = S_IRUGO;
+module_param_named(stats_perms, proc_stats_perms, uint, S_IRUGO | S_IWUSR);
+
+static struct proc_dir_entry *xt_qtaguid_ctrl_file;
+
+/* Everybody can write. But proc_ctrl_write_limited is true by default which
+ * limits what can be controlled. See the can_*() functions.
+ */
+static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUGO;
+module_param_named(ctrl_perms, proc_ctrl_perms, uint, S_IRUGO | S_IWUSR);
+
+/* Limited by default, so the gid of the ctrl and stats proc entries
+ * will limit what can be done. See the can_*() functions.
+ */
+static bool proc_stats_readall_limited = true;
+static bool proc_ctrl_write_limited = true;
+
+module_param_named(stats_readall_limited, proc_stats_readall_limited, bool,
+                  S_IRUGO | S_IWUSR);
+module_param_named(ctrl_write_limited, proc_ctrl_write_limited, bool,
+                  S_IRUGO | S_IWUSR);
+
+/*
+ * Limit the number of active tags (via socket tags) for a given UID.
+ * Multiple processes could share the UID.
+ */
+static int max_sock_tags = DEFAULT_MAX_SOCK_TAGS;
+module_param(max_sock_tags, int, S_IRUGO | S_IWUSR);
+
+/*
+ * After the kernel has initiallized this module, it is still possible
+ * to make it passive.
+ * Setting passive to Y:
+ *  - the iface stats handling will not act on notifications.
+ *  - iptables matches will never match.
+ *  - ctrl commands silently succeed.
+ *  - stats are always empty.
+ * This is mostly usefull when a bug is suspected.
+ */
+static bool module_passive;
+module_param_named(passive, module_passive, bool, S_IRUGO | S_IWUSR);
+
+/*
+ * Control how qtaguid data is tracked per proc/uid.
+ * Setting tag_tracking_passive to Y:
+ *  - don't create proc specific structs to track tags
+ *  - don't check that active tag stats exceed some limits.
+ *  - don't clean up socket tags on process exits.
+ * This is mostly usefull when a bug is suspected.
+ */
+static bool qtu_proc_handling_passive;
+module_param_named(tag_tracking_passive, qtu_proc_handling_passive, bool,
+                  S_IRUGO | S_IWUSR);
+
+#define QTU_DEV_NAME "xt_qtaguid"
+
+uint qtaguid_debug_mask = DEFAULT_DEBUG_MASK;
+module_param_named(debug_mask, qtaguid_debug_mask, uint, S_IRUGO | S_IWUSR);
+
+/*---------------------------------------------------------------------------*/
+static const char *iface_stat_procdirname = "iface_stat";
+static struct proc_dir_entry *iface_stat_procdir;
+/*
+ * The iface_stat_all* will go away once userspace gets use to the new fields
+ * that have a format line.
+ */
+static const char *iface_stat_all_procfilename = "iface_stat_all";
+static struct proc_dir_entry *iface_stat_all_procfile;
+static const char *iface_stat_fmt_procfilename = "iface_stat_fmt";
+static struct proc_dir_entry *iface_stat_fmt_procfile;
+
+
+static LIST_HEAD(iface_stat_list);
+static DEFINE_SPINLOCK(iface_stat_list_lock);
+
+static struct rb_root sock_tag_tree = RB_ROOT;
+static DEFINE_SPINLOCK(sock_tag_list_lock);
+
+static struct rb_root tag_counter_set_tree = RB_ROOT;
+static DEFINE_SPINLOCK(tag_counter_set_list_lock);
+
+static struct rb_root uid_tag_data_tree = RB_ROOT;
+static DEFINE_SPINLOCK(uid_tag_data_tree_lock);
+
+static struct rb_root proc_qtu_data_tree = RB_ROOT;
+/* No proc_qtu_data_tree_lock; use uid_tag_data_tree_lock */
+
+static struct qtaguid_event_counts qtu_events;
+/*----------------------------------------------*/
+static bool can_manipulate_uids(void)
+{
+       /* root pwnd */
+       return in_egroup_p(xt_qtaguid_ctrl_file->gid)
+               || unlikely(!current_fsuid()) || unlikely(!proc_ctrl_write_limited)
+               || unlikely(current_fsuid() == xt_qtaguid_ctrl_file->uid);
+}
+
+static bool can_impersonate_uid(uid_t uid)
+{
+       return uid == current_fsuid() || can_manipulate_uids();
+}
+
+static bool can_read_other_uid_stats(uid_t uid)
+{
+       /* root pwnd */
+       return in_egroup_p(xt_qtaguid_stats_file->gid)
+               || unlikely(!current_fsuid()) || uid == current_fsuid()
+               || unlikely(!proc_stats_readall_limited)
+               || unlikely(current_fsuid() == xt_qtaguid_ctrl_file->uid);
+}
+
+static inline void dc_add_byte_packets(struct data_counters *counters, int set,
+                                 enum ifs_tx_rx direction,
+                                 enum ifs_proto ifs_proto,
+                                 int bytes,
+                                 int packets)
+{
+       counters->bpc[set][direction][ifs_proto].bytes += bytes;
+       counters->bpc[set][direction][ifs_proto].packets += packets;
+}
+
+static struct tag_node *tag_node_tree_search(struct rb_root *root, tag_t tag)
+{
+       struct rb_node *node = root->rb_node;
+
+       while (node) {
+               struct tag_node *data = rb_entry(node, struct tag_node, node);
+               int result;
+               RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
+                        " node=%p data=%p\n", tag, node, data);
+               result = tag_compare(tag, data->tag);
+               RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
+                        " data.tag=0x%llx (uid=%u) res=%d\n",
+                        tag, data->tag, get_uid_from_tag(data->tag), result);
+               if (result < 0)
+                       node = node->rb_left;
+               else if (result > 0)
+                       node = node->rb_right;
+               else
+                       return data;
+       }
+       return NULL;
+}
+
+static void tag_node_tree_insert(struct tag_node *data, struct rb_root *root)
+{
+       struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+       /* Figure out where to put new node */
+       while (*new) {
+               struct tag_node *this = rb_entry(*new, struct tag_node,
+                                                node);
+               int result = tag_compare(data->tag, this->tag);
+               RB_DEBUG("qtaguid: %s(): tag=0x%llx"
+                        " (uid=%u)\n", __func__,
+                        this->tag,
+                        get_uid_from_tag(this->tag));
+               parent = *new;
+               if (result < 0)
+                       new = &((*new)->rb_left);
+               else if (result > 0)
+                       new = &((*new)->rb_right);
+               else
+                       BUG();
+       }
+
+       /* Add new node and rebalance tree. */
+       rb_link_node(&data->node, parent, new);
+       rb_insert_color(&data->node, root);
+}
+
+static void tag_stat_tree_insert(struct tag_stat *data, struct rb_root *root)
+{
+       tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_stat *tag_stat_tree_search(struct rb_root *root, tag_t tag)
+{
+       struct tag_node *node = tag_node_tree_search(root, tag);
+       if (!node)
+               return NULL;
+       return rb_entry(&node->node, struct tag_stat, tn.node);
+}
+
+static void tag_counter_set_tree_insert(struct tag_counter_set *data,
+                                       struct rb_root *root)
+{
+       tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_counter_set *tag_counter_set_tree_search(struct rb_root *root,
+                                                          tag_t tag)
+{
+       struct tag_node *node = tag_node_tree_search(root, tag);
+       if (!node)
+               return NULL;
+       return rb_entry(&node->node, struct tag_counter_set, tn.node);
+
+}
+
+static void tag_ref_tree_insert(struct tag_ref *data, struct rb_root *root)
+{
+       tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_ref *tag_ref_tree_search(struct rb_root *root, tag_t tag)
+{
+       struct tag_node *node = tag_node_tree_search(root, tag);
+       if (!node)
+               return NULL;
+       return rb_entry(&node->node, struct tag_ref, tn.node);
+}
+
+static struct sock_tag *sock_tag_tree_search(struct rb_root *root,
+                                            const struct sock *sk)
+{
+       struct rb_node *node = root->rb_node;
+
+       while (node) {
+               struct sock_tag *data = rb_entry(node, struct sock_tag,
+                                                sock_node);
+               if (sk < data->sk)
+                       node = node->rb_left;
+               else if (sk > data->sk)
+                       node = node->rb_right;
+               else
+                       return data;
+       }
+       return NULL;
+}
+
+static void sock_tag_tree_insert(struct sock_tag *data, struct rb_root *root)
+{
+       struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+       /* Figure out where to put new node */
+       while (*new) {
+               struct sock_tag *this = rb_entry(*new, struct sock_tag,
+                                                sock_node);
+               parent = *new;
+               if (data->sk < this->sk)
+                       new = &((*new)->rb_left);
+               else if (data->sk > this->sk)
+                       new = &((*new)->rb_right);
+               else
+                       BUG();
+       }
+
+       /* Add new node and rebalance tree. */
+       rb_link_node(&data->sock_node, parent, new);
+       rb_insert_color(&data->sock_node, root);
+}
+
+static void sock_tag_tree_erase(struct rb_root *st_to_free_tree)
+{
+       struct rb_node *node;
+       struct sock_tag *st_entry;
+
+       node = rb_first(st_to_free_tree);
+       while (node) {
+               st_entry = rb_entry(node, struct sock_tag, sock_node);
+               node = rb_next(node);
+               CT_DEBUG("qtaguid: %s(): "
+                        "erase st: sk=%p tag=0x%llx (uid=%u)\n", __func__,
+                        st_entry->sk,
+                        st_entry->tag,
+                        get_uid_from_tag(st_entry->tag));
+               rb_erase(&st_entry->sock_node, st_to_free_tree);
+               sockfd_put(st_entry->socket);
+               kfree(st_entry);
+       }
+}
+
+static struct proc_qtu_data *proc_qtu_data_tree_search(struct rb_root *root,
+                                                      const pid_t pid)
+{
+       struct rb_node *node = root->rb_node;
+
+       while (node) {
+               struct proc_qtu_data *data = rb_entry(node,
+                                                     struct proc_qtu_data,
+                                                     node);
+               if (pid < data->pid)
+                       node = node->rb_left;
+               else if (pid > data->pid)
+                       node = node->rb_right;
+               else
+                       return data;
+       }
+       return NULL;
+}
+
+static void proc_qtu_data_tree_insert(struct proc_qtu_data *data,
+                                     struct rb_root *root)
+{
+       struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+       /* Figure out where to put new node */
+       while (*new) {
+               struct proc_qtu_data *this = rb_entry(*new,
+                                                     struct proc_qtu_data,
+                                                     node);
+               parent = *new;
+               if (data->pid < this->pid)
+                       new = &((*new)->rb_left);
+               else if (data->pid > this->pid)
+                       new = &((*new)->rb_right);
+               else
+                       BUG();
+       }
+
+       /* Add new node and rebalance tree. */
+       rb_link_node(&data->node, parent, new);
+       rb_insert_color(&data->node, root);
+}
+
+static void uid_tag_data_tree_insert(struct uid_tag_data *data,
+                                    struct rb_root *root)
+{
+       struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+       /* Figure out where to put new node */
+       while (*new) {
+               struct uid_tag_data *this = rb_entry(*new,
+                                                    struct uid_tag_data,
+                                                    node);
+               parent = *new;
+               if (data->uid < this->uid)
+                       new = &((*new)->rb_left);
+               else if (data->uid > this->uid)
+                       new = &((*new)->rb_right);
+               else
+                       BUG();
+       }
+
+       /* Add new node and rebalance tree. */
+       rb_link_node(&data->node, parent, new);
+       rb_insert_color(&data->node, root);
+}
+
+static struct uid_tag_data *uid_tag_data_tree_search(struct rb_root *root,
+                                                    uid_t uid)
+{
+       struct rb_node *node = root->rb_node;
+
+       while (node) {
+               struct uid_tag_data *data = rb_entry(node,
+                                                    struct uid_tag_data,
+                                                    node);
+               if (uid < data->uid)
+                       node = node->rb_left;
+               else if (uid > data->uid)
+                       node = node->rb_right;
+               else
+                       return data;
+       }
+       return NULL;
+}
+
+/*
+ * Allocates a new uid_tag_data struct if needed.
+ * Returns a pointer to the found or allocated uid_tag_data.
+ * Returns a PTR_ERR on failures, and lock is not held.
+ * If found is not NULL:
+ *   sets *found to true if not allocated.
+ *   sets *found to false if allocated.
+ */
+struct uid_tag_data *get_uid_data(uid_t uid, bool *found_res)
+{
+       struct uid_tag_data *utd_entry;
+
+       /* Look for top level uid_tag_data for the UID */
+       utd_entry = uid_tag_data_tree_search(&uid_tag_data_tree, uid);
+       DR_DEBUG("qtaguid: get_uid_data(%u) utd=%p\n", uid, utd_entry);
+
+       if (found_res)
+               *found_res = utd_entry;
+       if (utd_entry)
+               return utd_entry;
+
+       utd_entry = kzalloc(sizeof(*utd_entry), GFP_ATOMIC);
+       if (!utd_entry) {
+               pr_err("qtaguid: get_uid_data(%u): "
+                      "tag data alloc failed\n", uid);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       utd_entry->uid = uid;
+       utd_entry->tag_ref_tree = RB_ROOT;
+       uid_tag_data_tree_insert(utd_entry, &uid_tag_data_tree);
+       DR_DEBUG("qtaguid: get_uid_data(%u) new utd=%p\n", uid, utd_entry);
+       return utd_entry;
+}
+
+/* Never returns NULL. Either PTR_ERR or a valid ptr. */
+static struct tag_ref *new_tag_ref(tag_t new_tag,
+                                  struct uid_tag_data *utd_entry)
+{
+       struct tag_ref *tr_entry;
+       int res;
+
+       if (utd_entry->num_active_tags + 1 > max_sock_tags) {
+               pr_info("qtaguid: new_tag_ref(0x%llx): "
+                       "tag ref alloc quota exceeded. max=%d\n",
+                       new_tag, max_sock_tags);
+               res = -EMFILE;
+               goto err_res;
+
+       }
+
+       tr_entry = kzalloc(sizeof(*tr_entry), GFP_ATOMIC);
+       if (!tr_entry) {
+               pr_err("qtaguid: new_tag_ref(0x%llx): "
+                      "tag ref alloc failed\n",
+                      new_tag);
+               res = -ENOMEM;
+               goto err_res;
+       }
+       tr_entry->tn.tag = new_tag;
+       /* tr_entry->num_sock_tags  handled by caller */
+       utd_entry->num_active_tags++;
+       tag_ref_tree_insert(tr_entry, &utd_entry->tag_ref_tree);
+       DR_DEBUG("qtaguid: new_tag_ref(0x%llx): "
+                " inserted new tag ref %p\n",
+                new_tag, tr_entry);
+       return tr_entry;
+
+err_res:
+       return ERR_PTR(res);
+}
+
+static struct tag_ref *lookup_tag_ref(tag_t full_tag,
+                                     struct uid_tag_data **utd_res)
+{
+       struct uid_tag_data *utd_entry;
+       struct tag_ref *tr_entry;
+       bool found_utd;
+       uid_t uid = get_uid_from_tag(full_tag);
+
+       DR_DEBUG("qtaguid: lookup_tag_ref(tag=0x%llx (uid=%u))\n",
+                full_tag, uid);
+
+       utd_entry = get_uid_data(uid, &found_utd);
+       if (IS_ERR_OR_NULL(utd_entry)) {
+               if (utd_res)
+                       *utd_res = utd_entry;
+               return NULL;
+       }
+
+       tr_entry = tag_ref_tree_search(&utd_entry->tag_ref_tree, full_tag);
+       if (utd_res)
+               *utd_res = utd_entry;
+       DR_DEBUG("qtaguid: lookup_tag_ref(0x%llx) utd_entry=%p tr_entry=%p\n",
+                full_tag, utd_entry, tr_entry);
+       return tr_entry;
+}
+
+/* Never returns NULL. Either PTR_ERR or a valid ptr. */
+static struct tag_ref *get_tag_ref(tag_t full_tag,
+                                  struct uid_tag_data **utd_res)
+{
+       struct uid_tag_data *utd_entry;
+       struct tag_ref *tr_entry;
+
+       DR_DEBUG("qtaguid: get_tag_ref(0x%llx)\n",
+                full_tag);
+       spin_lock_bh(&uid_tag_data_tree_lock);
+       tr_entry = lookup_tag_ref(full_tag, &utd_entry);
+       BUG_ON(IS_ERR_OR_NULL(utd_entry));
+       if (!tr_entry)
+               tr_entry = new_tag_ref(full_tag, utd_entry);
+
+       spin_unlock_bh(&uid_tag_data_tree_lock);
+       if (utd_res)
+               *utd_res = utd_entry;
+       DR_DEBUG("qtaguid: get_tag_ref(0x%llx) utd=%p tr=%p\n",
+                full_tag, utd_entry, tr_entry);
+       return tr_entry;
+}
+
+/* Checks and maybe frees the UID Tag Data entry */
+static void put_utd_entry(struct uid_tag_data *utd_entry)
+{
+       /* Are we done with the UID tag data entry? */
+       if (RB_EMPTY_ROOT(&utd_entry->tag_ref_tree) &&
+               !utd_entry->num_pqd) {
+               DR_DEBUG("qtaguid: %s(): "
+                        "erase utd_entry=%p uid=%u "
+                        "by pid=%u tgid=%u uid=%u\n", __func__,
+                        utd_entry, utd_entry->uid,
+                        current->pid, current->tgid, current_fsuid());
+               BUG_ON(utd_entry->num_active_tags);
+               rb_erase(&utd_entry->node, &uid_tag_data_tree);
+               kfree(utd_entry);
+       } else {
+               DR_DEBUG("qtaguid: %s(): "
+                        "utd_entry=%p still has %d tags %d proc_qtu_data\n",
+                        __func__, utd_entry, utd_entry->num_active_tags,
+                        utd_entry->num_pqd);
+               BUG_ON(!(utd_entry->num_active_tags ||
+                        utd_entry->num_pqd));
+       }
+}
+
+/*
+ * If no sock_tags are using this tag_ref,
+ * decrements refcount of utd_entry, removes tr_entry
+ * from utd_entry->tag_ref_tree and frees.
+ */
+static void free_tag_ref_from_utd_entry(struct tag_ref *tr_entry,
+                                       struct uid_tag_data *utd_entry)
+{
+       DR_DEBUG("qtaguid: %s(): %p tag=0x%llx (uid=%u)\n", __func__,
+                tr_entry, tr_entry->tn.tag,
+                get_uid_from_tag(tr_entry->tn.tag));
+       if (!tr_entry->num_sock_tags) {
+               BUG_ON(!utd_entry->num_active_tags);
+               utd_entry->num_active_tags--;
+               rb_erase(&tr_entry->tn.node, &utd_entry->tag_ref_tree);
+               DR_DEBUG("qtaguid: %s(): erased %p\n", __func__, tr_entry);
+               kfree(tr_entry);
+       }
+}
+
+static void put_tag_ref_tree(tag_t full_tag, struct uid_tag_data *utd_entry)
+{
+       struct rb_node *node;
+       struct tag_ref *tr_entry;
+       tag_t acct_tag;
+
+       DR_DEBUG("qtaguid: %s(tag=0x%llx (uid=%u))\n", __func__,
+                full_tag, get_uid_from_tag(full_tag));
+       acct_tag = get_atag_from_tag(full_tag);
+       node = rb_first(&utd_entry->tag_ref_tree);
+       while (node) {
+               tr_entry = rb_entry(node, struct tag_ref, tn.node);
+               node = rb_next(node);
+               if (!acct_tag || tr_entry->tn.tag == full_tag)
+                       free_tag_ref_from_utd_entry(tr_entry, utd_entry);
+       }
+}
+
+static ssize_t read_proc_u64(struct file *file, char __user *buf,
+                        size_t size, loff_t *ppos)
+{
+       uint64_t *valuep = PDE_DATA(file_inode(file));
+       char tmp[24];
+       size_t tmp_size;
+
+       tmp_size = scnprintf(tmp, sizeof(tmp), "%llu\n", *valuep);
+       return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
+}
+
+static ssize_t read_proc_bool(struct file *file, char __user *buf,
+                         size_t size, loff_t *ppos)
+{
+       bool *valuep = PDE_DATA(file_inode(file));
+       char tmp[24];
+       size_t tmp_size;
+
+       tmp_size = scnprintf(tmp, sizeof(tmp), "%u\n", *valuep);
+       return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
+}
+
+static int get_active_counter_set(tag_t tag)
+{
+       int active_set = 0;
+       struct tag_counter_set *tcs;
+
+       MT_DEBUG("qtaguid: get_active_counter_set(tag=0x%llx)"
+                " (uid=%u)\n",
+                tag, get_uid_from_tag(tag));
+       /* For now we only handle UID tags for active sets */
+       tag = get_utag_from_tag(tag);
+       spin_lock_bh(&tag_counter_set_list_lock);
+       tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+       if (tcs)
+               active_set = tcs->active_set;
+       spin_unlock_bh(&tag_counter_set_list_lock);
+       return active_set;
+}
+
+/*
+ * Find the entry for tracking the specified interface.
+ * Caller must hold iface_stat_list_lock
+ */
+static struct iface_stat *get_iface_entry(const char *ifname)
+{
+       struct iface_stat *iface_entry;
+
+       /* Find the entry for tracking the specified tag within the interface */
+       if (ifname == NULL) {
+               pr_info("qtaguid: iface_stat: get() NULL device name\n");
+               return NULL;
+       }
+
+       /* Iterate over interfaces */
+       list_for_each_entry(iface_entry, &iface_stat_list, list) {
+               if (!strcmp(ifname, iface_entry->ifname))
+                       goto done;
+       }
+       iface_entry = NULL;
+done:
+       return iface_entry;
+}
+
+/* This is for fmt2 only */
+static void pp_iface_stat_header(struct seq_file *m)
+{
+       seq_puts(m,
+                "ifname "
+                "total_skb_rx_bytes total_skb_rx_packets "
+                "total_skb_tx_bytes total_skb_tx_packets "
+                "rx_tcp_bytes rx_tcp_packets "
+                "rx_udp_bytes rx_udp_packets "
+                "rx_other_bytes rx_other_packets "
+                "tx_tcp_bytes tx_tcp_packets "
+                "tx_udp_bytes tx_udp_packets "
+                "tx_other_bytes tx_other_packets\n"
+       );
+}
+
+static void pp_iface_stat_line(struct seq_file *m,
+                              struct iface_stat *iface_entry)
+{
+       struct data_counters *cnts;
+       int cnt_set = 0;   /* We only use one set for the device */
+       cnts = &iface_entry->totals_via_skb;
+       seq_printf(m, "%s %llu %llu %llu %llu %llu %llu %llu %llu "
+                  "%llu %llu %llu %llu %llu %llu %llu %llu\n",
+                  iface_entry->ifname,
+                  dc_sum_bytes(cnts, cnt_set, IFS_RX),
+                  dc_sum_packets(cnts, cnt_set, IFS_RX),
+                  dc_sum_bytes(cnts, cnt_set, IFS_TX),
+                  dc_sum_packets(cnts, cnt_set, IFS_TX),
+                  cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes,
+                  cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets,
+                  cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes,
+                  cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets,
+                  cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes,
+                  cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets,
+                  cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes,
+                  cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets,
+                  cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes,
+                  cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets,
+                  cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes,
+                  cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets);
+}
+
+struct proc_iface_stat_fmt_info {
+       long fmt;
+};
+
+static void *iface_stat_fmt_proc_start(struct seq_file *m, loff_t *pos)
+{
+       struct proc_iface_stat_fmt_info *p = m->private;
+       loff_t n = *pos;
+
+       /*
+        * This lock will prevent iface_stat_update() from changing active,
+        * and in turn prevent an interface from unregistering itself.
+        */
+       spin_lock_bh(&iface_stat_list_lock);
+
+       if (unlikely(module_passive))
+               return NULL;
+
+       if (!n && p->fmt == 2)
+               pp_iface_stat_header(m);
+
+       return seq_list_start(&iface_stat_list, n);
+}
+
+static void *iface_stat_fmt_proc_next(struct seq_file *m, void *p, loff_t *pos)
+{
+       return seq_list_next(p, &iface_stat_list, pos);
+}
+
+static void iface_stat_fmt_proc_stop(struct seq_file *m, void *p)
+{
+       spin_unlock_bh(&iface_stat_list_lock);
+}
+
+static int iface_stat_fmt_proc_show(struct seq_file *m, void *v)
+{
+       struct proc_iface_stat_fmt_info *p = m->private;
+       struct iface_stat *iface_entry;
+       struct rtnl_link_stats64 dev_stats, *stats;
+       struct rtnl_link_stats64 no_dev_stats = {0};
+
+
+       CT_DEBUG("qtaguid:proc iface_stat_fmt pid=%u tgid=%u uid=%u\n",
+                current->pid, current->tgid, current_fsuid());
+
+       iface_entry = list_entry(v, struct iface_stat, list);
+
+       if (iface_entry->active) {
+               stats = dev_get_stats(iface_entry->net_dev,
+                                     &dev_stats);
+       } else {
+               stats = &no_dev_stats;
+       }
+       /*
+        * If the meaning of the data changes, then update the fmtX
+        * string.
+        */
+       if (p->fmt == 1) {
+               seq_printf(m, "%s %d %llu %llu %llu %llu %llu %llu %llu %llu\n",
+                          iface_entry->ifname,
+                          iface_entry->active,
+                          iface_entry->totals_via_dev[IFS_RX].bytes,
+                          iface_entry->totals_via_dev[IFS_RX].packets,
+                          iface_entry->totals_via_dev[IFS_TX].bytes,
+                          iface_entry->totals_via_dev[IFS_TX].packets,
+                          stats->rx_bytes, stats->rx_packets,
+                          stats->tx_bytes, stats->tx_packets
+                          );
+       } else {
+               pp_iface_stat_line(m, iface_entry);
+       }
+       return 0;
+}
+
+static const struct file_operations read_u64_fops = {
+       .read           = read_proc_u64,
+       .llseek         = default_llseek,
+};
+
+static const struct file_operations read_bool_fops = {
+       .read           = read_proc_bool,
+       .llseek         = default_llseek,
+};
+
+static void iface_create_proc_worker(struct work_struct *work)
+{
+       struct proc_dir_entry *proc_entry;
+       struct iface_stat_work *isw = container_of(work, struct iface_stat_work,
+                                                  iface_work);
+       struct iface_stat *new_iface  = isw->iface_entry;
+
+       /* iface_entries are not deleted, so safe to manipulate. */
+       proc_entry = proc_mkdir(new_iface->ifname, iface_stat_procdir);
+       if (IS_ERR_OR_NULL(proc_entry)) {
+               pr_err("qtaguid: iface_stat: create_proc(): alloc failed.\n");
+               kfree(isw);
+               return;
+       }
+
+       new_iface->proc_ptr = proc_entry;
+
+       proc_create_data("tx_bytes", proc_iface_perms, proc_entry,
+                        &read_u64_fops,
+                        &new_iface->totals_via_dev[IFS_TX].bytes);
+       proc_create_data("rx_bytes", proc_iface_perms, proc_entry,
+                        &read_u64_fops,
+                        &new_iface->totals_via_dev[IFS_RX].bytes);
+       proc_create_data("tx_packets", proc_iface_perms, proc_entry,
+                        &read_u64_fops,
+                        &new_iface->totals_via_dev[IFS_TX].packets);
+       proc_create_data("rx_packets", proc_iface_perms, proc_entry,
+                        &read_u64_fops,
+                        &new_iface->totals_via_dev[IFS_RX].packets);
+       proc_create_data("active", proc_iface_perms, proc_entry,
+                        &read_bool_fops, &new_iface->active);
+
+       IF_DEBUG("qtaguid: iface_stat: create_proc(): done "
+                "entry=%p dev=%s\n", new_iface, new_iface->ifname);
+       kfree(isw);
+}
+
+/*
+ * Will set the entry's active state, and
+ * update the net_dev accordingly also.
+ */
+static void _iface_stat_set_active(struct iface_stat *entry,
+                                  struct net_device *net_dev,
+                                  bool activate)
+{
+       if (activate) {
+               entry->net_dev = net_dev;
+               entry->active = true;
+               IF_DEBUG("qtaguid: %s(%s): "
+                        "enable tracking. rfcnt=%d\n", __func__,
+                        entry->ifname,
+                        __this_cpu_read(*net_dev->pcpu_refcnt));
+       } else {
+               entry->active = false;
+               entry->net_dev = NULL;
+               IF_DEBUG("qtaguid: %s(%s): "
+                        "disable tracking. rfcnt=%d\n", __func__,
+                        entry->ifname,
+                        __this_cpu_read(*net_dev->pcpu_refcnt));
+
+       }
+}
+
+/* Caller must hold iface_stat_list_lock */
+static struct iface_stat *iface_alloc(struct net_device *net_dev)
+{
+       struct iface_stat *new_iface;
+       struct iface_stat_work *isw;
+
+       new_iface = kzalloc(sizeof(*new_iface), GFP_ATOMIC);
+       if (new_iface == NULL) {
+               pr_err("qtaguid: iface_stat: create(%s): "
+                      "iface_stat alloc failed\n", net_dev->name);
+               return NULL;
+       }
+       new_iface->ifname = kstrdup(net_dev->name, GFP_ATOMIC);
+       if (new_iface->ifname == NULL) {
+               pr_err("qtaguid: iface_stat: create(%s): "
+                      "ifname alloc failed\n", net_dev->name);
+               kfree(new_iface);
+               return NULL;
+       }
+       spin_lock_init(&new_iface->tag_stat_list_lock);
+       new_iface->tag_stat_tree = RB_ROOT;
+       _iface_stat_set_active(new_iface, net_dev, true);
+
+       /*
+        * ipv6 notifier chains are atomic :(
+        * No create_proc_read_entry() for you!
+        */
+       isw = kmalloc(sizeof(*isw), GFP_ATOMIC);
+       if (!isw) {
+               pr_err("qtaguid: iface_stat: create(%s): "
+                      "work alloc failed\n", new_iface->ifname);
+               _iface_stat_set_active(new_iface, net_dev, false);
+               kfree(new_iface->ifname);
+               kfree(new_iface);
+               return NULL;
+       }
+       isw->iface_entry = new_iface;
+       INIT_WORK(&isw->iface_work, iface_create_proc_worker);
+       schedule_work(&isw->iface_work);
+       list_add(&new_iface->list, &iface_stat_list);
+       return new_iface;
+}
+
+static void iface_check_stats_reset_and_adjust(struct net_device *net_dev,
+                                              struct iface_stat *iface)
+{
+       struct rtnl_link_stats64 dev_stats, *stats;
+       bool stats_rewound;
+
+       stats = dev_get_stats(net_dev, &dev_stats);
+       /* No empty packets */
+       stats_rewound =
+               (stats->rx_bytes < iface->last_known[IFS_RX].bytes)
+               || (stats->tx_bytes < iface->last_known[IFS_TX].bytes);
+
+       IF_DEBUG("qtaguid: %s(%s): iface=%p netdev=%p "
+                "bytes rx/tx=%llu/%llu "
+                "active=%d last_known=%d "
+                "stats_rewound=%d\n", __func__,
+                net_dev ? net_dev->name : "?",
+                iface, net_dev,
+                stats->rx_bytes, stats->tx_bytes,
+                iface->active, iface->last_known_valid, stats_rewound);
+
+       if (iface->active && iface->last_known_valid && stats_rewound) {
+               pr_warn_once("qtaguid: iface_stat: %s(%s): "
+                            "iface reset its stats unexpectedly\n", __func__,
+                            net_dev->name);
+
+               iface->totals_via_dev[IFS_TX].bytes +=
+                       iface->last_known[IFS_TX].bytes;
+               iface->totals_via_dev[IFS_TX].packets +=
+                       iface->last_known[IFS_TX].packets;
+               iface->totals_via_dev[IFS_RX].bytes +=
+                       iface->last_known[IFS_RX].bytes;
+               iface->totals_via_dev[IFS_RX].packets +=
+                       iface->last_known[IFS_RX].packets;
+               iface->last_known_valid = false;
+               IF_DEBUG("qtaguid: %s(%s): iface=%p "
+                        "used last known bytes rx/tx=%llu/%llu\n", __func__,
+                        iface->ifname, iface, iface->last_known[IFS_RX].bytes,
+                        iface->last_known[IFS_TX].bytes);
+       }
+}
+
+/*
+ * Create a new entry for tracking the specified interface.
+ * Do nothing if the entry already exists.
+ * Called when an interface is configured with a valid IP address.
+ */
+static void iface_stat_create(struct net_device *net_dev,
+                             struct in_ifaddr *ifa)
+{
+       struct in_device *in_dev = NULL;
+       const char *ifname;
+       struct iface_stat *entry;
+       __be32 ipaddr = 0;
+       struct iface_stat *new_iface;
+
+       IF_DEBUG("qtaguid: iface_stat: create(%s): ifa=%p netdev=%p\n",
+                net_dev ? net_dev->name : "?",
+                ifa, net_dev);
+       if (!net_dev) {
+               pr_err("qtaguid: iface_stat: create(): no net dev\n");
+               return;
+       }
+
+       ifname = net_dev->name;
+       if (!ifa) {
+               in_dev = in_dev_get(net_dev);
+               if (!in_dev) {
+                       pr_err("qtaguid: iface_stat: create(%s): no inet dev\n",
+                              ifname);
+                       return;
+               }
+               IF_DEBUG("qtaguid: iface_stat: create(%s): in_dev=%p\n",
+                        ifname, in_dev);
+               for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
+                       IF_DEBUG("qtaguid: iface_stat: create(%s): "
+                                "ifa=%p ifa_label=%s\n",
+                                ifname, ifa,
+                                ifa->ifa_label ? ifa->ifa_label : "(null)");
+                       if (ifa->ifa_label && !strcmp(ifname, ifa->ifa_label))
+                               break;
+               }
+       }
+
+       if (!ifa) {
+               IF_DEBUG("qtaguid: iface_stat: create(%s): no matching IP\n",
+                        ifname);
+               goto done_put;
+       }
+       ipaddr = ifa->ifa_local;
+
+       spin_lock_bh(&iface_stat_list_lock);
+       entry = get_iface_entry(ifname);
+       if (entry != NULL) {
+               IF_DEBUG("qtaguid: iface_stat: create(%s): entry=%p\n",
+                        ifname, entry);
+               iface_check_stats_reset_and_adjust(net_dev, entry);
+               _iface_stat_set_active(entry, net_dev, true);
+               IF_DEBUG("qtaguid: %s(%s): "
+                        "tracking now %d on ip=%pI4\n", __func__,
+                        entry->ifname, true, &ipaddr);
+               goto done_unlock_put;
+       }
+
+       new_iface = iface_alloc(net_dev);
+       IF_DEBUG("qtaguid: iface_stat: create(%s): done "
+                "entry=%p ip=%pI4\n", ifname, new_iface, &ipaddr);
+done_unlock_put:
+       spin_unlock_bh(&iface_stat_list_lock);
+done_put:
+       if (in_dev)
+               in_dev_put(in_dev);
+}
+
+static void iface_stat_create_ipv6(struct net_device *net_dev,
+                                  struct inet6_ifaddr *ifa)
+{
+       struct in_device *in_dev;
+       const char *ifname;
+       struct iface_stat *entry;
+       struct iface_stat *new_iface;
+       int addr_type;
+
+       IF_DEBUG("qtaguid: iface_stat: create6(): ifa=%p netdev=%p->name=%s\n",
+                ifa, net_dev, net_dev ? net_dev->name : "");
+       if (!net_dev) {
+               pr_err("qtaguid: iface_stat: create6(): no net dev!\n");
+               return;
+       }
+       ifname = net_dev->name;
+
+       in_dev = in_dev_get(net_dev);
+       if (!in_dev) {
+               pr_err("qtaguid: iface_stat: create6(%s): no inet dev\n",
+                      ifname);
+               return;
+       }
+
+       IF_DEBUG("qtaguid: iface_stat: create6(%s): in_dev=%p\n",
+                ifname, in_dev);
+
+       if (!ifa) {
+               IF_DEBUG("qtaguid: iface_stat: create6(%s): no matching IP\n",
+                        ifname);
+               goto done_put;
+       }
+       addr_type = ipv6_addr_type(&ifa->addr);
+
+       spin_lock_bh(&iface_stat_list_lock);
+       entry = get_iface_entry(ifname);
+       if (entry != NULL) {
+               IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+                        ifname, entry);
+               iface_check_stats_reset_and_adjust(net_dev, entry);
+               _iface_stat_set_active(entry, net_dev, true);
+               IF_DEBUG("qtaguid: %s(%s): "
+                        "tracking now %d on ip=%pI6c\n", __func__,
+                        entry->ifname, true, &ifa->addr);
+               goto done_unlock_put;
+       }
+
+       new_iface = iface_alloc(net_dev);
+       IF_DEBUG("qtaguid: iface_stat: create6(%s): done "
+                "entry=%p ip=%pI6c\n", ifname, new_iface, &ifa->addr);
+
+done_unlock_put:
+       spin_unlock_bh(&iface_stat_list_lock);
+done_put:
+       in_dev_put(in_dev);
+}
+
+static struct sock_tag *get_sock_stat_nl(const struct sock *sk)
+{
+       MT_DEBUG("qtaguid: get_sock_stat_nl(sk=%p)\n", sk);
+       return sock_tag_tree_search(&sock_tag_tree, sk);
+}
+
+static struct sock_tag *get_sock_stat(const struct sock *sk)
+{
+       struct sock_tag *sock_tag_entry;
+       MT_DEBUG("qtaguid: get_sock_stat(sk=%p)\n", sk);
+       if (!sk)
+               return NULL;
+       spin_lock_bh(&sock_tag_list_lock);
+       sock_tag_entry = get_sock_stat_nl(sk);
+       spin_unlock_bh(&sock_tag_list_lock);
+       return sock_tag_entry;
+}
+
+static int ipx_proto(const struct sk_buff *skb,
+                    struct xt_action_param *par)
+{
+       int thoff = 0, tproto;
+
+       switch (par->family) {
+       case NFPROTO_IPV6:
+               tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
+               if (tproto < 0)
+                       MT_DEBUG("%s(): transport header not found in ipv6"
+                                " skb=%p\n", __func__, skb);
+               break;
+       case NFPROTO_IPV4:
+               tproto = ip_hdr(skb)->protocol;
+               break;
+       default:
+               tproto = IPPROTO_RAW;
+       }
+       return tproto;
+}
+
+static void
+data_counters_update(struct data_counters *dc, int set,
+                    enum ifs_tx_rx direction, int proto, int bytes)
+{
+       switch (proto) {
+       case IPPROTO_TCP:
+               dc_add_byte_packets(dc, set, direction, IFS_TCP, bytes, 1);
+               break;
+       case IPPROTO_UDP:
+               dc_add_byte_packets(dc, set, direction, IFS_UDP, bytes, 1);
+               break;
+       case IPPROTO_IP:
+       default:
+               dc_add_byte_packets(dc, set, direction, IFS_PROTO_OTHER, bytes,
+                                   1);
+               break;
+       }
+}
+
+/*
+ * Update stats for the specified interface. Do nothing if the entry
+ * does not exist (when a device was never configured with an IP address).
+ * Called when an device is being unregistered.
+ */
+static void iface_stat_update(struct net_device *net_dev, bool stash_only)
+{
+       struct rtnl_link_stats64 dev_stats, *stats;
+       struct iface_stat *entry;
+
+       stats = dev_get_stats(net_dev, &dev_stats);
+       spin_lock_bh(&iface_stat_list_lock);
+       entry = get_iface_entry(net_dev->name);
+       if (entry == NULL) {
+               IF_DEBUG("qtaguid: iface_stat: update(%s): not tracked\n",
+                        net_dev->name);
+               spin_unlock_bh(&iface_stat_list_lock);
+               return;
+       }
+
+       IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+                net_dev->name, entry);
+       if (!entry->active) {
+               IF_DEBUG("qtaguid: %s(%s): already disabled\n", __func__,
+                        net_dev->name);
+               spin_unlock_bh(&iface_stat_list_lock);
+               return;
+       }
+
+       if (stash_only) {
+               entry->last_known[IFS_TX].bytes = stats->tx_bytes;
+               entry->last_known[IFS_TX].packets = stats->tx_packets;
+               entry->last_known[IFS_RX].bytes = stats->rx_bytes;
+               entry->last_known[IFS_RX].packets = stats->rx_packets;
+               entry->last_known_valid = true;
+               IF_DEBUG("qtaguid: %s(%s): "
+                        "dev stats stashed rx/tx=%llu/%llu\n", __func__,
+                        net_dev->name, stats->rx_bytes, stats->tx_bytes);
+               spin_unlock_bh(&iface_stat_list_lock);
+               return;
+       }
+       entry->totals_via_dev[IFS_TX].bytes += stats->tx_bytes;
+       entry->totals_via_dev[IFS_TX].packets += stats->tx_packets;
+       entry->totals_via_dev[IFS_RX].bytes += stats->rx_bytes;
+       entry->totals_via_dev[IFS_RX].packets += stats->rx_packets;
+       /* We don't need the last_known[] anymore */
+       entry->last_known_valid = false;
+       _iface_stat_set_active(entry, net_dev, false);
+       IF_DEBUG("qtaguid: %s(%s): "
+                "disable tracking. rx/tx=%llu/%llu\n", __func__,
+                net_dev->name, stats->rx_bytes, stats->tx_bytes);
+       spin_unlock_bh(&iface_stat_list_lock);
+}
+
+/*
+ * Update stats for the specified interface from the skb.
+ * Do nothing if the entry
+ * does not exist (when a device was never configured with an IP address).
+ * Called on each sk.
+ */
+static void iface_stat_update_from_skb(const struct sk_buff *skb,
+                                      struct xt_action_param *par)
+{
+       struct iface_stat *entry;
+       const struct net_device *el_dev;
+       enum ifs_tx_rx direction = par->in ? IFS_RX : IFS_TX;
+       int bytes = skb->len;
+       int proto;
+
+       if (!skb->dev) {
+               MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
+               el_dev = par->in ? : par->out;
+       } else {
+               const struct net_device *other_dev;
+               el_dev = skb->dev;
+               other_dev = par->in ? : par->out;
+               if (el_dev != other_dev) {
+                       MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
+                                "par->(in/out)=%p %s\n",
+                                par->hooknum, el_dev, el_dev->name, other_dev,
+                                other_dev->name);
+               }
+       }
+
+       if (unlikely(!el_dev)) {
+               pr_err_ratelimited("qtaguid[%d]: %s(): no par->in/out?!!\n",
+                                  par->hooknum, __func__);
+               BUG();
+       } else if (unlikely(!el_dev->name)) {
+               pr_err_ratelimited("qtaguid[%d]: %s(): no dev->name?!!\n",
+                                  par->hooknum, __func__);
+               BUG();
+       } else {
+               proto = ipx_proto(skb, par);
+               MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
+                        par->hooknum, el_dev->name, el_dev->type,
+                        par->family, proto);
+       }
+
+       spin_lock_bh(&iface_stat_list_lock);
+       entry = get_iface_entry(el_dev->name);
+       if (entry == NULL) {
+               IF_DEBUG("qtaguid: iface_stat: %s(%s): not tracked\n",
+                        __func__, el_dev->name);
+               spin_unlock_bh(&iface_stat_list_lock);
+               return;
+       }
+
+       IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+                el_dev->name, entry);
+
+       data_counters_update(&entry->totals_via_skb, 0, direction, proto,
+                            bytes);
+       spin_unlock_bh(&iface_stat_list_lock);
+}
+
+static void tag_stat_update(struct tag_stat *tag_entry,
+                       enum ifs_tx_rx direction, int proto, int bytes)
+{
+       int active_set;
+       active_set = get_active_counter_set(tag_entry->tn.tag);
+       MT_DEBUG("qtaguid: tag_stat_update(tag=0x%llx (uid=%u) set=%d "
+                "dir=%d proto=%d bytes=%d)\n",
+                tag_entry->tn.tag, get_uid_from_tag(tag_entry->tn.tag),
+                active_set, direction, proto, bytes);
+       data_counters_update(&tag_entry->counters, active_set, direction,
+                            proto, bytes);
+       if (tag_entry->parent_counters)
+               data_counters_update(tag_entry->parent_counters, active_set,
+                                    direction, proto, bytes);
+}
+
+/*
+ * Create a new entry for tracking the specified {acct_tag,uid_tag} within
+ * the interface.
+ * iface_entry->tag_stat_list_lock should be held.
+ */
+static struct tag_stat *create_if_tag_stat(struct iface_stat *iface_entry,
+                                          tag_t tag)
+{
+       struct tag_stat *new_tag_stat_entry = NULL;
+       IF_DEBUG("qtaguid: iface_stat: %s(): ife=%p tag=0x%llx"
+                " (uid=%u)\n", __func__,
+                iface_entry, tag, get_uid_from_tag(tag));
+       new_tag_stat_entry = kzalloc(sizeof(*new_tag_stat_entry), GFP_ATOMIC);
+       if (!new_tag_stat_entry) {
+               pr_err("qtaguid: iface_stat: tag stat alloc failed\n");
+               goto done;
+       }
+       new_tag_stat_entry->tn.tag = tag;
+       tag_stat_tree_insert(new_tag_stat_entry, &iface_entry->tag_stat_tree);
+done:
+       return new_tag_stat_entry;
+}
+
+static void if_tag_stat_update(const char *ifname, uid_t uid,
+                              const struct sock *sk, enum ifs_tx_rx direction,
+                              int proto, int bytes)
+{
+       struct tag_stat *tag_stat_entry;
+       tag_t tag, acct_tag;
+       tag_t uid_tag;
+       struct data_counters *uid_tag_counters;
+       struct sock_tag *sock_tag_entry;
+       struct iface_stat *iface_entry;
+       struct tag_stat *new_tag_stat = NULL;
+       MT_DEBUG("qtaguid: if_tag_stat_update(ifname=%s "
+               "uid=%u sk=%p dir=%d proto=%d bytes=%d)\n",
+                ifname, uid, sk, direction, proto, bytes);
+
+
+       iface_entry = get_iface_entry(ifname);
+       if (!iface_entry) {
+               pr_err_ratelimited("qtaguid: iface_stat: stat_update() "
+                                  "%s not found\n", ifname);
+               return;
+       }
+       /* It is ok to process data when an iface_entry is inactive */
+
+       MT_DEBUG("qtaguid: iface_stat: stat_update() dev=%s entry=%p\n",
+                ifname, iface_entry);
+
+       /*
+        * Look for a tagged sock.
+        * It will have an acct_uid.
+        */
+       sock_tag_entry = get_sock_stat(sk);
+       if (sock_tag_entry) {
+               tag = sock_tag_entry->tag;
+               acct_tag = get_atag_from_tag(tag);
+               uid_tag = get_utag_from_tag(tag);
+       } else {
+               acct_tag = make_atag_from_value(0);
+               tag = combine_atag_with_uid(acct_tag, uid);
+               uid_tag = make_tag_from_uid(uid);
+       }
+       MT_DEBUG("qtaguid: iface_stat: stat_update(): "
+                " looking for tag=0x%llx (uid=%u) in ife=%p\n",
+                tag, get_uid_from_tag(tag), iface_entry);
+       /* Loop over tag list under this interface for {acct_tag,uid_tag} */
+       spin_lock_bh(&iface_entry->tag_stat_list_lock);
+
+       tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+                                             tag);
+       if (tag_stat_entry) {
+               /*
+                * Updating the {acct_tag, uid_tag} entry handles both stats:
+                * {0, uid_tag} will also get updated.
+                */
+               tag_stat_update(tag_stat_entry, direction, proto, bytes);
+               spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+               return;
+       }
+
+       /* Loop over tag list under this interface for {0,uid_tag} */
+       tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+                                             uid_tag);
+       if (!tag_stat_entry) {
+               /* Here: the base uid_tag did not exist */
+               /*
+                * No parent counters. So
+                *  - No {0, uid_tag} stats and no {acc_tag, uid_tag} stats.
+                */
+               new_tag_stat = create_if_tag_stat(iface_entry, uid_tag);
+               if (!new_tag_stat)
+                       goto unlock;
+               uid_tag_counters = &new_tag_stat->counters;
+       } else {
+               uid_tag_counters = &tag_stat_entry->counters;
+       }
+
+       if (acct_tag) {
+               /* Create the child {acct_tag, uid_tag} and hook up parent. */
+               new_tag_stat = create_if_tag_stat(iface_entry, tag);
+               if (!new_tag_stat)
+                       goto unlock;
+               new_tag_stat->parent_counters = uid_tag_counters;
+       } else {
+               /*
+                * For new_tag_stat to be still NULL here would require:
+                *  {0, uid_tag} exists
+                *  and {acct_tag, uid_tag} doesn't exist
+                *  AND acct_tag == 0.
+                * Impossible. This reassures us that new_tag_stat
+                * below will always be assigned.
+                */
+               BUG_ON(!new_tag_stat);
+       }
+       tag_stat_update(new_tag_stat, direction, proto, bytes);
+unlock:
+       spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+}
+
+static int iface_netdev_event_handler(struct notifier_block *nb,
+                                     unsigned long event, void *ptr) {
+       struct net_device *dev = ptr;
+
+       if (unlikely(module_passive))
+               return NOTIFY_DONE;
+
+       IF_DEBUG("qtaguid: iface_stat: netdev_event(): "
+                "ev=0x%lx/%s netdev=%p->name=%s\n",
+                event, netdev_evt_str(event), dev, dev ? dev->name : "");
+
+       switch (event) {
+       case NETDEV_UP:
+               iface_stat_create(dev, NULL);
+               atomic64_inc(&qtu_events.iface_events);
+               break;
+       case NETDEV_DOWN:
+       case NETDEV_UNREGISTER:
+               iface_stat_update(dev, event == NETDEV_DOWN);
+               atomic64_inc(&qtu_events.iface_events);
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static int iface_inet6addr_event_handler(struct notifier_block *nb,
+                                        unsigned long event, void *ptr)
+{
+       struct inet6_ifaddr *ifa = ptr;
+       struct net_device *dev;
+
+       if (unlikely(module_passive))
+               return NOTIFY_DONE;
+
+       IF_DEBUG("qtaguid: iface_stat: inet6addr_event(): "
+                "ev=0x%lx/%s ifa=%p\n",
+                event, netdev_evt_str(event), ifa);
+
+       switch (event) {
+       case NETDEV_UP:
+               BUG_ON(!ifa || !ifa->idev);
+               dev = (struct net_device *)ifa->idev->dev;
+               iface_stat_create_ipv6(dev, ifa);
+               atomic64_inc(&qtu_events.iface_events);
+               break;
+       case NETDEV_DOWN:
+       case NETDEV_UNREGISTER:
+               BUG_ON(!ifa || !ifa->idev);
+               dev = (struct net_device *)ifa->idev->dev;
+               iface_stat_update(dev, event == NETDEV_DOWN);
+               atomic64_inc(&qtu_events.iface_events);
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static int iface_inetaddr_event_handler(struct notifier_block *nb,
+                                       unsigned long event, void *ptr)
+{
+       struct in_ifaddr *ifa = ptr;
+       struct net_device *dev;
+
+       if (unlikely(module_passive))
+               return NOTIFY_DONE;
+
+       IF_DEBUG("qtaguid: iface_stat: inetaddr_event(): "
+                "ev=0x%lx/%s ifa=%p\n",
+                event, netdev_evt_str(event), ifa);
+
+       switch (event) {
+       case NETDEV_UP:
+               BUG_ON(!ifa || !ifa->ifa_dev);
+               dev = ifa->ifa_dev->dev;
+               iface_stat_create(dev, ifa);
+               atomic64_inc(&qtu_events.iface_events);
+               break;
+       case NETDEV_DOWN:
+       case NETDEV_UNREGISTER:
+               BUG_ON(!ifa || !ifa->ifa_dev);
+               dev = ifa->ifa_dev->dev;
+               iface_stat_update(dev, event == NETDEV_DOWN);
+               atomic64_inc(&qtu_events.iface_events);
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block iface_netdev_notifier_blk = {
+       .notifier_call = iface_netdev_event_handler,
+};
+
+static struct notifier_block iface_inetaddr_notifier_blk = {
+       .notifier_call = iface_inetaddr_event_handler,
+};
+
+static struct notifier_block iface_inet6addr_notifier_blk = {
+       .notifier_call = iface_inet6addr_event_handler,
+};
+
+static const struct seq_operations iface_stat_fmt_proc_seq_ops = {
+       .start  = iface_stat_fmt_proc_start,
+       .next   = iface_stat_fmt_proc_next,
+       .stop   = iface_stat_fmt_proc_stop,
+       .show   = iface_stat_fmt_proc_show,
+};
+
+static int proc_iface_stat_fmt_open(struct inode *inode, struct file *file)
+{
+       struct proc_iface_stat_fmt_info *s;
+
+       s = __seq_open_private(file, &iface_stat_fmt_proc_seq_ops,
+                       sizeof(struct proc_iface_stat_fmt_info));
+       if (!s)
+               return -ENOMEM;
+
+       s->fmt = (uintptr_t)PDE_DATA(inode);
+       return 0;
+}
+
+static const struct file_operations proc_iface_stat_fmt_fops = {
+       .open           = proc_iface_stat_fmt_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release_private,
+};
+
+static int __init iface_stat_init(struct proc_dir_entry *parent_procdir)
+{
+       int err;
+
+       iface_stat_procdir = proc_mkdir(iface_stat_procdirname, parent_procdir);
+       if (!iface_stat_procdir) {
+               pr_err("qtaguid: iface_stat: init failed to create proc entry\n");
+               err = -1;
+               goto err;
+       }
+
+       iface_stat_all_procfile = proc_create_data(iface_stat_all_procfilename,
+                                                  proc_iface_perms,
+                                                  parent_procdir,
+                                                  &proc_iface_stat_fmt_fops,
+                                                  (void *)1 /* fmt1 */);
+       if (!iface_stat_all_procfile) {
+               pr_err("qtaguid: iface_stat: init "
+                      " failed to create stat_old proc entry\n");
+               err = -1;
+               goto err_zap_entry;
+       }
+
+       iface_stat_fmt_procfile = proc_create_data(iface_stat_fmt_procfilename,
+                                                  proc_iface_perms,
+                                                  parent_procdir,
+                                                  &proc_iface_stat_fmt_fops,
+                                                  (void *)2 /* fmt2 */);
+       if (!iface_stat_fmt_procfile) {
+               pr_err("qtaguid: iface_stat: init "
+                      " failed to create stat_all proc entry\n");
+               err = -1;
+               goto err_zap_all_stats_entry;
+       }
+
+
+       err = register_netdevice_notifier(&iface_netdev_notifier_blk);
+       if (err) {
+               pr_err("qtaguid: iface_stat: init "
+                      "failed to register dev event handler\n");
+               goto err_zap_all_stats_entries;
+       }
+       err = register_inetaddr_notifier(&iface_inetaddr_notifier_blk);
+       if (err) {
+               pr_err("qtaguid: iface_stat: init "
+                      "failed to register ipv4 dev event handler\n");
+               goto err_unreg_nd;
+       }
+
+       err = register_inet6addr_notifier(&iface_inet6addr_notifier_blk);
+       if (err) {
+               pr_err("qtaguid: iface_stat: init "
+                      "failed to register ipv6 dev event handler\n");
+               goto err_unreg_ip4_addr;
+       }
+       return 0;
+
+err_unreg_ip4_addr:
+       unregister_inetaddr_notifier(&iface_inetaddr_notifier_blk);
+err_unreg_nd:
+       unregister_netdevice_notifier(&iface_netdev_notifier_blk);
+err_zap_all_stats_entries:
+       remove_proc_entry(iface_stat_fmt_procfilename, parent_procdir);
+err_zap_all_stats_entry:
+       remove_proc_entry(iface_stat_all_procfilename, parent_procdir);
+err_zap_entry:
+       remove_proc_entry(iface_stat_procdirname, parent_procdir);
+err:
+       return err;
+}
+
+static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
+                                   struct xt_action_param *par)
+{
+       struct sock *sk;
+       unsigned int hook_mask = (1 << par->hooknum);
+
+       MT_DEBUG("qtaguid: find_sk(skb=%p) hooknum=%d family=%d\n", skb,
+                par->hooknum, par->family);
+
+       /*
+        * Let's not abuse the the xt_socket_get*_sk(), or else it will
+        * return garbage SKs.
+        */
+       if (!(hook_mask & XT_SOCKET_SUPPORTED_HOOKS))
+               return NULL;
+
+       switch (par->family) {
+       case NFPROTO_IPV6:
+               sk = xt_socket_get6_sk(skb, par);
+               break;
+       case NFPROTO_IPV4:
+               sk = xt_socket_get4_sk(skb, par);
+               break;
+       default:
+               return NULL;
+       }
+
+       if (sk) {
+               MT_DEBUG("qtaguid: %p->sk_proto=%u "
+                        "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state);
+               /*
+                * When in TCP_TIME_WAIT the sk is not a "struct sock" but
+                * "struct inet_timewait_sock" which is missing fields.
+                */
+               if (sk->sk_state  == TCP_TIME_WAIT) {
+                       xt_socket_put_sk(sk);
+                       sk = NULL;
+               }
+       }
+       return sk;
+}
+
+static void account_for_uid(const struct sk_buff *skb,
+                           const struct sock *alternate_sk, uid_t uid,
+                           struct xt_action_param *par)
+{
+       const struct net_device *el_dev;
+
+       if (!skb->dev) {
+               MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
+               el_dev = par->in ? : par->out;
+       } else {
+               const struct net_device *other_dev;
+               el_dev = skb->dev;
+               other_dev = par->in ? : par->out;
+               if (el_dev != other_dev) {
+                       MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
+                               "par->(in/out)=%p %s\n",
+                               par->hooknum, el_dev, el_dev->name, other_dev,
+                               other_dev->name);
+               }
+       }
+
+       if (unlikely(!el_dev)) {
+               pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum);
+       } else if (unlikely(!el_dev->name)) {
+               pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum);
+       } else {
+               int proto = ipx_proto(skb, par);
+               MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
+                        par->hooknum, el_dev->name, el_dev->type,
+                        par->family, proto);
+
+               if_tag_stat_update(el_dev->name, uid,
+                               skb->sk ? skb->sk : alternate_sk,
+                               par->in ? IFS_RX : IFS_TX,
+                               proto, skb->len);
+       }
+}
+
+static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_qtaguid_match_info *info = par->matchinfo;
+       const struct file *filp;
+       bool got_sock = false;
+       struct sock *sk;
+       uid_t sock_uid;
+       bool res;
+
+       if (unlikely(module_passive))
+               return (info->match ^ info->invert) == 0;
+
+       MT_DEBUG("qtaguid[%d]: entered skb=%p par->in=%p/out=%p fam=%d\n",
+                par->hooknum, skb, par->in, par->out, par->family);
+
+       atomic64_inc(&qtu_events.match_calls);
+       if (skb == NULL) {
+               res = (info->match ^ info->invert) == 0;
+               goto ret_res;
+       }
+
+       switch (par->hooknum) {
+       case NF_INET_PRE_ROUTING:
+       case NF_INET_POST_ROUTING:
+               atomic64_inc(&qtu_events.match_calls_prepost);
+               iface_stat_update_from_skb(skb, par);
+               /*
+                * We are done in pre/post. The skb will get processed
+                * further alter.
+                */
+               res = (info->match ^ info->invert);
+               goto ret_res;
+               break;
+       /* default: Fall through and do UID releated work */
+       }
+
+       sk = skb->sk;
+       /*
+        * When in TCP_TIME_WAIT the sk is not a "struct sock" but
+        * "struct inet_timewait_sock" which is missing fields.
+        * So we ignore it.
+        */
+       if (sk && sk->sk_state == TCP_TIME_WAIT)
+               sk = NULL;
+       if (sk == NULL) {
+               /*
+                * A missing sk->sk_socket happens when packets are in-flight
+                * and the matching socket is already closed and gone.
+                */
+               sk = qtaguid_find_sk(skb, par);
+               /*
+                * If we got the socket from the find_sk(), we will need to put
+                * it back, as nf_tproxy_get_sock_v4() got it.
+                */
+               got_sock = sk;
+               if (sk)
+                       atomic64_inc(&qtu_events.match_found_sk_in_ct);
+               else
+                       atomic64_inc(&qtu_events.match_found_no_sk_in_ct);
+       } else {
+               atomic64_inc(&qtu_events.match_found_sk);
+       }
+       MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
+                par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
+       if (sk != NULL) {
+               MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
+                       par->hooknum, sk, sk->sk_socket,
+                       sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
+               filp = sk->sk_socket ? sk->sk_socket->file : NULL;
+               MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
+                       par->hooknum, filp ? filp->f_cred->fsuid : -1);
+       }
+
+       if (sk == NULL || sk->sk_socket == NULL) {
+               /*
+                * Here, the qtaguid_find_sk() using connection tracking
+                * couldn't find the owner, so for now we just count them
+                * against the system.
+                */
+               /*
+                * TODO: unhack how to force just accounting.
+                * For now we only do iface stats when the uid-owner is not
+                * requested.
+                */
+               if (!(info->match & XT_QTAGUID_UID))
+                       account_for_uid(skb, sk, 0, par);
+               MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n",
+                       par->hooknum,
+                       sk ? sk->sk_socket : NULL);
+               res = (info->match ^ info->invert) == 0;
+               atomic64_inc(&qtu_events.match_no_sk);
+               goto put_sock_ret_res;
+       } else if (info->match & info->invert & XT_QTAGUID_SOCKET) {
+               res = false;
+               goto put_sock_ret_res;
+       }
+       filp = sk->sk_socket->file;
+       if (filp == NULL) {
+               MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum);
+               account_for_uid(skb, sk, 0, par);
+               res = ((info->match ^ info->invert) &
+                       (XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0;
+               atomic64_inc(&qtu_events.match_no_sk_file);
+               goto put_sock_ret_res;
+       }
+       sock_uid = filp->f_cred->fsuid;
+       /*
+        * TODO: unhack how to force just accounting.
+        * For now we only do iface stats when the uid-owner is not requested
+        */
+       if (!(info->match & XT_QTAGUID_UID))
+               account_for_uid(skb, sk, sock_uid, par);
+
+       /*
+        * The following two tests fail the match when:
+        *    id not in range AND no inverted condition requested
+        * or id     in range AND    inverted condition requested
+        * Thus (!a && b) || (a && !b) == a ^ b
+        */
+       if (info->match & XT_QTAGUID_UID)
+               if ((filp->f_cred->fsuid >= info->uid_min &&
+                    filp->f_cred->fsuid <= info->uid_max) ^
+                   !(info->invert & XT_QTAGUID_UID)) {
+                       MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
+                                par->hooknum);
+                       res = false;
+                       goto put_sock_ret_res;
+               }
+       if (info->match & XT_QTAGUID_GID)
+               if ((filp->f_cred->fsgid >= info->gid_min &&
+                               filp->f_cred->fsgid <= info->gid_max) ^
+                       !(info->invert & XT_QTAGUID_GID)) {
+                       MT_DEBUG("qtaguid[%d]: leaving gid not matching\n",
+                               par->hooknum);
+                       res = false;
+                       goto put_sock_ret_res;
+               }
+
+       MT_DEBUG("qtaguid[%d]: leaving matched\n", par->hooknum);
+       res = true;
+
+put_sock_ret_res:
+       if (got_sock)
+               xt_socket_put_sk(sk);
+ret_res:
+       MT_DEBUG("qtaguid[%d]: left %d\n", par->hooknum, res);
+       return res;
+}
+
+#ifdef DDEBUG
+/* This function is not in xt_qtaguid_print.c because of locks visibility */
+static void prdebug_full_state(int indent_level, const char *fmt, ...)
+{
+       va_list args;
+       char *fmt_buff;
+       char *buff;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       fmt_buff = kasprintf(GFP_ATOMIC,
+                            "qtaguid: %s(): %s {\n", __func__, fmt);
+       BUG_ON(!fmt_buff);
+       va_start(args, fmt);
+       buff = kvasprintf(GFP_ATOMIC,
+                         fmt_buff, args);
+       BUG_ON(!buff);
+       pr_debug("%s", buff);
+       kfree(fmt_buff);
+       kfree(buff);
+       va_end(args);
+
+       spin_lock_bh(&sock_tag_list_lock);
+       prdebug_sock_tag_tree(indent_level, &sock_tag_tree);
+       spin_unlock_bh(&sock_tag_list_lock);
+
+       spin_lock_bh(&sock_tag_list_lock);
+       spin_lock_bh(&uid_tag_data_tree_lock);
+       prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree);
+       prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree);
+       spin_unlock_bh(&uid_tag_data_tree_lock);
+       spin_unlock_bh(&sock_tag_list_lock);
+
+       spin_lock_bh(&iface_stat_list_lock);
+       prdebug_iface_stat_list(indent_level, &iface_stat_list);
+       spin_unlock_bh(&iface_stat_list_lock);
+
+       pr_debug("qtaguid: %s(): }\n", __func__);
+}
+#else
+static void prdebug_full_state(int indent_level, const char *fmt, ...) {}
+#endif
+
+struct proc_ctrl_print_info {
+       struct sock *sk; /* socket found by reading to sk_pos */
+       loff_t sk_pos;
+};
+
+static void *qtaguid_ctrl_proc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct proc_ctrl_print_info *pcpi = m->private;
+       struct sock_tag *sock_tag_entry = v;
+       struct rb_node *node;
+
+       (*pos)++;
+
+       if (!v || v  == SEQ_START_TOKEN)
+               return NULL;
+
+       node = rb_next(&sock_tag_entry->sock_node);
+       if (!node) {
+               pcpi->sk = NULL;
+               sock_tag_entry = SEQ_START_TOKEN;
+       } else {
+               sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+               pcpi->sk = sock_tag_entry->sk;
+       }
+       pcpi->sk_pos = *pos;
+       return sock_tag_entry;
+}
+
+static void *qtaguid_ctrl_proc_start(struct seq_file *m, loff_t *pos)
+{
+       struct proc_ctrl_print_info *pcpi = m->private;
+       struct sock_tag *sock_tag_entry;
+       struct rb_node *node;
+
+       spin_lock_bh(&sock_tag_list_lock);
+
+       if (unlikely(module_passive))
+               return NULL;
+
+       if (*pos == 0) {
+               pcpi->sk_pos = 0;
+               node = rb_first(&sock_tag_tree);
+               if (!node) {
+                       pcpi->sk = NULL;
+                       return SEQ_START_TOKEN;
+               }
+               sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+               pcpi->sk = sock_tag_entry->sk;
+       } else {
+               sock_tag_entry = (pcpi->sk ? get_sock_stat_nl(pcpi->sk) :
+                                               NULL) ?: SEQ_START_TOKEN;
+               if (*pos != pcpi->sk_pos) {
+                       /* seq_read skipped a next call */
+                       *pos = pcpi->sk_pos;
+                       return qtaguid_ctrl_proc_next(m, sock_tag_entry, pos);
+               }
+       }
+       return sock_tag_entry;
+}
+
+static void qtaguid_ctrl_proc_stop(struct seq_file *m, void *v)
+{
+       spin_unlock_bh(&sock_tag_list_lock);
+}
+
+/*
+ * Procfs reader to get all active socket tags using style "1)" as described in
+ * fs/proc/generic.c
+ */
+static int qtaguid_ctrl_proc_show(struct seq_file *m, void *v)
+{
+       struct sock_tag *sock_tag_entry = v;
+       uid_t uid;
+       long f_count;
+
+       CT_DEBUG("qtaguid: proc ctrl pid=%u tgid=%u uid=%u\n",
+                current->pid, current->tgid, current_fsuid());
+
+       if (sock_tag_entry != SEQ_START_TOKEN) {
+               uid = get_uid_from_tag(sock_tag_entry->tag);
+               CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u) "
+                        "pid=%u\n",
+                        sock_tag_entry->sk,
+                        sock_tag_entry->tag,
+                        uid,
+                        sock_tag_entry->pid
+                       );
+               f_count = atomic_long_read(
+                       &sock_tag_entry->socket->file->f_count);
+               seq_printf(m, "sock=%p tag=0x%llx (uid=%u) pid=%u "
+                          "f_count=%lu\n",
+                          sock_tag_entry->sk,
+                          sock_tag_entry->tag, uid,
+                          sock_tag_entry->pid, f_count);
+       } else {
+               seq_printf(m, "events: sockets_tagged=%llu "
+                          "sockets_untagged=%llu "
+                          "counter_set_changes=%llu "
+                          "delete_cmds=%llu "
+                          "iface_events=%llu "
+                          "match_calls=%llu "
+                          "match_calls_prepost=%llu "
+                          "match_found_sk=%llu "
+                          "match_found_sk_in_ct=%llu "
+                          "match_found_no_sk_in_ct=%llu "
+                          "match_no_sk=%llu "
+                          "match_no_sk_file=%llu\n",
+                          (u64)atomic64_read(&qtu_events.sockets_tagged),
+                          (u64)atomic64_read(&qtu_events.sockets_untagged),
+                          (u64)atomic64_read(&qtu_events.counter_set_changes),
+                          (u64)atomic64_read(&qtu_events.delete_cmds),
+                          (u64)atomic64_read(&qtu_events.iface_events),
+                          (u64)atomic64_read(&qtu_events.match_calls),
+                          (u64)atomic64_read(&qtu_events.match_calls_prepost),
+                          (u64)atomic64_read(&qtu_events.match_found_sk),
+                          (u64)atomic64_read(&qtu_events.match_found_sk_in_ct),
+                          (u64)atomic64_read(&qtu_events.match_found_no_sk_in_ct),
+                          (u64)atomic64_read(&qtu_events.match_no_sk),
+                          (u64)atomic64_read(&qtu_events.match_no_sk_file));
+
+               /* Count the following as part of the last item_index */
+               prdebug_full_state(0, "proc ctrl");
+       }
+
+       return 0;
+}
+
+/*
+ * Delete socket tags, and stat tags associated with a given
+ * accouting tag and uid.
+ */
+static int ctrl_cmd_delete(const char *input)
+{
+       char cmd;
+       uid_t uid;
+       uid_t entry_uid;
+       tag_t acct_tag;
+       tag_t tag;
+       int res, argc;
+       struct iface_stat *iface_entry;
+       struct rb_node *node;
+       struct sock_tag *st_entry;
+       struct rb_root st_to_free_tree = RB_ROOT;
+       struct tag_stat *ts_entry;
+       struct tag_counter_set *tcs_entry;
+       struct tag_ref *tr_entry;
+       struct uid_tag_data *utd_entry;
+
+       argc = sscanf(input, "%c %llu %u", &cmd, &acct_tag, &uid);
+       CT_DEBUG("qtaguid: ctrl_delete(%s): argc=%d cmd=%c "
+                "user_tag=0x%llx uid=%u\n", input, argc, cmd,
+                acct_tag, uid);
+       if (argc < 2) {
+               res = -EINVAL;
+               goto err;
+       }
+       if (!valid_atag(acct_tag)) {
+               pr_info("qtaguid: ctrl_delete(%s): invalid tag\n", input);
+               res = -EINVAL;
+               goto err;
+       }
+       if (argc < 3) {
+               uid = current_fsuid();
+       } else if (!can_impersonate_uid(uid)) {
+               pr_info("qtaguid: ctrl_delete(%s): "
+                       "insufficient priv from pid=%u tgid=%u uid=%u\n",
+                       input, current->pid, current->tgid, current_fsuid());
+               res = -EPERM;
+               goto err;
+       }
+
+       tag = combine_atag_with_uid(acct_tag, uid);
+       CT_DEBUG("qtaguid: ctrl_delete(%s): "
+                "looking for tag=0x%llx (uid=%u)\n",
+                input, tag, uid);
+
+       /* Delete socket tags */
+       spin_lock_bh(&sock_tag_list_lock);
+       node = rb_first(&sock_tag_tree);
+       while (node) {
+               st_entry = rb_entry(node, struct sock_tag, sock_node);
+               entry_uid = get_uid_from_tag(st_entry->tag);
+               node = rb_next(node);
+               if (entry_uid != uid)
+                       continue;
+
+               CT_DEBUG("qtaguid: ctrl_delete(%s): st tag=0x%llx (uid=%u)\n",
+                        input, st_entry->tag, entry_uid);
+
+               if (!acct_tag || st_entry->tag == tag) {
+                       rb_erase(&st_entry->sock_node, &sock_tag_tree);
+                       /* Can't sockfd_put() within spinlock, do it later. */
+                       sock_tag_tree_insert(st_entry, &st_to_free_tree);
+                       tr_entry = lookup_tag_ref(st_entry->tag, NULL);
+                       BUG_ON(tr_entry->num_sock_tags <= 0);
+                       tr_entry->num_sock_tags--;
+                       /*
+                        * TODO: remove if, and start failing.
+                        * This is a hack to work around the fact that in some
+                        * places we have "if (IS_ERR_OR_NULL(pqd_entry))"
+                        * and are trying to work around apps
+                        * that didn't open the /dev/xt_qtaguid.
+                        */
+                       if (st_entry->list.next && st_entry->list.prev)
+                               list_del(&st_entry->list);
+               }
+       }
+       spin_unlock_bh(&sock_tag_list_lock);
+
+       sock_tag_tree_erase(&st_to_free_tree);
+
+       /* Delete tag counter-sets */
+       spin_lock_bh(&tag_counter_set_list_lock);
+       /* Counter sets are only on the uid tag, not full tag */
+       tcs_entry = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+       if (tcs_entry) {
+               CT_DEBUG("qtaguid: ctrl_delete(%s): "
+                        "erase tcs: tag=0x%llx (uid=%u) set=%d\n",
+                        input,
+                        tcs_entry->tn.tag,
+                        get_uid_from_tag(tcs_entry->tn.tag),
+                        tcs_entry->active_set);
+               rb_erase(&tcs_entry->tn.node, &tag_counter_set_tree);
+               kfree(tcs_entry);
+       }
+       spin_unlock_bh(&tag_counter_set_list_lock);
+
+       /*
+        * If acct_tag is 0, then all entries belonging to uid are
+        * erased.
+        */
+       spin_lock_bh(&iface_stat_list_lock);
+       list_for_each_entry(iface_entry, &iface_stat_list, list) {
+               spin_lock_bh(&iface_entry->tag_stat_list_lock);
+               node = rb_first(&iface_entry->tag_stat_tree);
+               while (node) {
+                       ts_entry = rb_entry(node, struct tag_stat, tn.node);
+                       entry_uid = get_uid_from_tag(ts_entry->tn.tag);
+                       node = rb_next(node);
+
+                       CT_DEBUG("qtaguid: ctrl_delete(%s): "
+                                "ts tag=0x%llx (uid=%u)\n",
+                                input, ts_entry->tn.tag, entry_uid);
+
+                       if (entry_uid != uid)
+                               continue;
+                       if (!acct_tag || ts_entry->tn.tag == tag) {
+                               CT_DEBUG("qtaguid: ctrl_delete(%s): "
+                                        "erase ts: %s 0x%llx %u\n",
+                                        input, iface_entry->ifname,
+                                        get_atag_from_tag(ts_entry->tn.tag),
+                                        entry_uid);
+                               rb_erase(&ts_entry->tn.node,
+                                        &iface_entry->tag_stat_tree);
+                               kfree(ts_entry);
+                       }
+               }
+               spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+       }
+       spin_unlock_bh(&iface_stat_list_lock);
+
+       /* Cleanup the uid_tag_data */
+       spin_lock_bh(&uid_tag_data_tree_lock);
+       node = rb_first(&uid_tag_data_tree);
+       while (node) {
+               utd_entry = rb_entry(node, struct uid_tag_data, node);
+               entry_uid = utd_entry->uid;
+               node = rb_next(node);
+
+               CT_DEBUG("qtaguid: ctrl_delete(%s): "
+                        "utd uid=%u\n",
+                        input, entry_uid);
+
+               if (entry_uid != uid)
+                       continue;
+               /*
+                * Go over the tag_refs, and those that don't have
+                * sock_tags using them are freed.
+                */
+               put_tag_ref_tree(tag, utd_entry);
+               put_utd_entry(utd_entry);
+       }
+       spin_unlock_bh(&uid_tag_data_tree_lock);
+
+       atomic64_inc(&qtu_events.delete_cmds);
+       res = 0;
+
+err:
+       return res;
+}
+
+static int ctrl_cmd_counter_set(const char *input)
+{
+       char cmd;
+       uid_t uid = 0;
+       tag_t tag;
+       int res, argc;
+       struct tag_counter_set *tcs;
+       int counter_set;
+
+       argc = sscanf(input, "%c %d %u", &cmd, &counter_set, &uid);
+       CT_DEBUG("qtaguid: ctrl_counterset(%s): argc=%d cmd=%c "
+                "set=%d uid=%u\n", input, argc, cmd,
+                counter_set, uid);
+       if (argc != 3) {
+               res = -EINVAL;
+               goto err;
+       }
+       if (counter_set < 0 || counter_set >= IFS_MAX_COUNTER_SETS) {
+               pr_info("qtaguid: ctrl_counterset(%s): invalid counter_set range\n",
+                       input);
+               res = -EINVAL;
+               goto err;
+       }
+       if (!can_manipulate_uids()) {
+               pr_info("qtaguid: ctrl_counterset(%s): "
+                       "insufficient priv from pid=%u tgid=%u uid=%u\n",
+                       input, current->pid, current->tgid, current_fsuid());
+               res = -EPERM;
+               goto err;
+       }
+
+       tag = make_tag_from_uid(uid);
+       spin_lock_bh(&tag_counter_set_list_lock);
+       tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+       if (!tcs) {
+               tcs = kzalloc(sizeof(*tcs), GFP_ATOMIC);
+               if (!tcs) {
+                       spin_unlock_bh(&tag_counter_set_list_lock);
+                       pr_err("qtaguid: ctrl_counterset(%s): "
+                              "failed to alloc counter set\n",
+                              input);
+                       res = -ENOMEM;
+                       goto err;
+               }
+               tcs->tn.tag = tag;
+               tag_counter_set_tree_insert(tcs, &tag_counter_set_tree);
+               CT_DEBUG("qtaguid: ctrl_counterset(%s): added tcs tag=0x%llx "
+                        "(uid=%u) set=%d\n",
+                        input, tag, get_uid_from_tag(tag), counter_set);
+       }
+       tcs->active_set = counter_set;
+       spin_unlock_bh(&tag_counter_set_list_lock);
+       atomic64_inc(&qtu_events.counter_set_changes);
+       res = 0;
+
+err:
+       return res;
+}
+
+static int ctrl_cmd_tag(const char *input)
+{
+       char cmd;
+       int sock_fd = 0;
+       uid_t uid = 0;
+       tag_t acct_tag = make_atag_from_value(0);
+       tag_t full_tag;
+       struct socket *el_socket;
+       int res, argc;
+       struct sock_tag *sock_tag_entry;
+       struct tag_ref *tag_ref_entry;
+       struct uid_tag_data *uid_tag_data_entry;
+       struct proc_qtu_data *pqd_entry;
+
+       /* Unassigned args will get defaulted later. */
+       argc = sscanf(input, "%c %d %llu %u", &cmd, &sock_fd, &acct_tag, &uid);
+       CT_DEBUG("qtaguid: ctrl_tag(%s): argc=%d cmd=%c sock_fd=%d "
+                "acct_tag=0x%llx uid=%u\n", input, argc, cmd, sock_fd,
+                acct_tag, uid);
+       if (argc < 2) {
+               res = -EINVAL;
+               goto err;
+       }
+       el_socket = sockfd_lookup(sock_fd, &res);  /* This locks the file */
+       if (!el_socket) {
+               pr_info("qtaguid: ctrl_tag(%s): failed to lookup"
+                       " sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
+                       input, sock_fd, res, current->pid, current->tgid,
+                       current_fsuid());
+               goto err;
+       }
+       CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n",
+                input, atomic_long_read(&el_socket->file->f_count),
+                el_socket->sk);
+       if (argc < 3) {
+               acct_tag = make_atag_from_value(0);
+       } else if (!valid_atag(acct_tag)) {
+               pr_info("qtaguid: ctrl_tag(%s): invalid tag\n", input);
+               res = -EINVAL;
+               goto err_put;
+       }
+       CT_DEBUG("qtaguid: ctrl_tag(%s): "
+                "pid=%u tgid=%u uid=%u euid=%u fsuid=%u "
+                "ctrl.gid=%u in_group()=%d in_egroup()=%d\n",
+                input, current->pid, current->tgid, current_uid(),
+                current_euid(), current_fsuid(),
+                xt_qtaguid_ctrl_file->gid,
+                in_group_p(xt_qtaguid_ctrl_file->gid),
+                in_egroup_p(xt_qtaguid_ctrl_file->gid));
+       if (argc < 4) {
+               uid = current_fsuid();
+       } else if (!can_impersonate_uid(uid)) {
+               pr_info("qtaguid: ctrl_tag(%s): "
+                       "insufficient priv from pid=%u tgid=%u uid=%u\n",
+                       input, current->pid, current->tgid, current_fsuid());
+               res = -EPERM;
+               goto err_put;
+       }
+       full_tag = combine_atag_with_uid(acct_tag, uid);
+
+       spin_lock_bh(&sock_tag_list_lock);
+       sock_tag_entry = get_sock_stat_nl(el_socket->sk);
+       tag_ref_entry = get_tag_ref(full_tag, &uid_tag_data_entry);
+       if (IS_ERR(tag_ref_entry)) {
+               res = PTR_ERR(tag_ref_entry);
+               spin_unlock_bh(&sock_tag_list_lock);
+               goto err_put;
+       }
+       tag_ref_entry->num_sock_tags++;
+       if (sock_tag_entry) {
+               struct tag_ref *prev_tag_ref_entry;
+
+               CT_DEBUG("qtaguid: ctrl_tag(%s): retag for sk=%p "
+                        "st@%p ...->f_count=%ld\n",
+                        input, el_socket->sk, sock_tag_entry,
+                        atomic_long_read(&el_socket->file->f_count));
+               /*
+                * This is a re-tagging, so release the sock_fd that was
+                * locked at the time of the 1st tagging.
+                * There is still the ref from this call's sockfd_lookup() so
+                * it can be done within the spinlock.
+                */
+               sockfd_put(sock_tag_entry->socket);
+               prev_tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag,
+                                                   &uid_tag_data_entry);
+               BUG_ON(IS_ERR_OR_NULL(prev_tag_ref_entry));
+               BUG_ON(prev_tag_ref_entry->num_sock_tags <= 0);
+               prev_tag_ref_entry->num_sock_tags--;
+               sock_tag_entry->tag = full_tag;
+       } else {
+               CT_DEBUG("qtaguid: ctrl_tag(%s): newtag for sk=%p\n",
+                        input, el_socket->sk);
+               sock_tag_entry = kzalloc(sizeof(*sock_tag_entry),
+                                        GFP_ATOMIC);
+               if (!sock_tag_entry) {
+                       pr_err("qtaguid: ctrl_tag(%s): "
+                              "socket tag alloc failed\n",
+                              input);
+                       spin_unlock_bh(&sock_tag_list_lock);
+                       res = -ENOMEM;
+                       goto err_tag_unref_put;
+               }
+               sock_tag_entry->sk = el_socket->sk;
+               sock_tag_entry->socket = el_socket;
+               sock_tag_entry->pid = current->tgid;
+               sock_tag_entry->tag = combine_atag_with_uid(acct_tag,
+                                                           uid);
+               spin_lock_bh(&uid_tag_data_tree_lock);
+               pqd_entry = proc_qtu_data_tree_search(
+                       &proc_qtu_data_tree, current->tgid);
+               /*
+                * TODO: remove if, and start failing.
+                * At first, we want to catch user-space code that is not
+                * opening the /dev/xt_qtaguid.
+                */
+               if (IS_ERR_OR_NULL(pqd_entry))
+                       pr_warn_once(
+                               "qtaguid: %s(): "
+                               "User space forgot to open /dev/xt_qtaguid? "
+                               "pid=%u tgid=%u uid=%u\n", __func__,
+                               current->pid, current->tgid,
+                               current_fsuid());
+               else
+                       list_add(&sock_tag_entry->list,
+                                &pqd_entry->sock_tag_list);
+               spin_unlock_bh(&uid_tag_data_tree_lock);
+
+               sock_tag_tree_insert(sock_tag_entry, &sock_tag_tree);
+               atomic64_inc(&qtu_events.sockets_tagged);
+       }
+       spin_unlock_bh(&sock_tag_list_lock);
+       /* We keep the ref to the socket (file) until it is untagged */
+       CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->f_count=%ld\n",
+                input, sock_tag_entry,
+                atomic_long_read(&el_socket->file->f_count));
+       return 0;
+
+err_tag_unref_put:
+       BUG_ON(tag_ref_entry->num_sock_tags <= 0);
+       tag_ref_entry->num_sock_tags--;
+       free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry);
+err_put:
+       CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->f_count=%ld\n",
+                input, atomic_long_read(&el_socket->file->f_count) - 1);
+       /* Release the sock_fd that was grabbed by sockfd_lookup(). */
+       sockfd_put(el_socket);
+       return res;
+
+err:
+       CT_DEBUG("qtaguid: ctrl_tag(%s): done.\n", input);
+       return res;
+}
+
+static int ctrl_cmd_untag(const char *input)
+{
+       char cmd;
+       int sock_fd = 0;
+       struct socket *el_socket;
+       int res, argc;
+       struct sock_tag *sock_tag_entry;
+       struct tag_ref *tag_ref_entry;
+       struct uid_tag_data *utd_entry;
+       struct proc_qtu_data *pqd_entry;
+
+       argc = sscanf(input, "%c %d", &cmd, &sock_fd);
+       CT_DEBUG("qtaguid: ctrl_untag(%s): argc=%d cmd=%c sock_fd=%d\n",
+                input, argc, cmd, sock_fd);
+       if (argc < 2) {
+               res = -EINVAL;
+               goto err;
+       }
+       el_socket = sockfd_lookup(sock_fd, &res);  /* This locks the file */
+       if (!el_socket) {
+               pr_info("qtaguid: ctrl_untag(%s): failed to lookup"
+                       " sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
+                       input, sock_fd, res, current->pid, current->tgid,
+                       current_fsuid());
+               goto err;
+       }
+       CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n",
+                input, atomic_long_read(&el_socket->file->f_count),
+                el_socket->sk);
+       spin_lock_bh(&sock_tag_list_lock);
+       sock_tag_entry = get_sock_stat_nl(el_socket->sk);
+       if (!sock_tag_entry) {
+               spin_unlock_bh(&sock_tag_list_lock);
+               res = -EINVAL;
+               goto err_put;
+       }
+       /*
+        * The socket already belongs to the current process
+        * so it can do whatever it wants to it.
+        */
+       rb_erase(&sock_tag_entry->sock_node, &sock_tag_tree);
+
+       tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag, &utd_entry);
+       BUG_ON(!tag_ref_entry);
+       BUG_ON(tag_ref_entry->num_sock_tags <= 0);
+       spin_lock_bh(&uid_tag_data_tree_lock);
+       pqd_entry = proc_qtu_data_tree_search(
+               &proc_qtu_data_tree, current->tgid);
+       /*
+        * TODO: remove if, and start failing.
+        * At first, we want to catch user-space code that is not
+        * opening the /dev/xt_qtaguid.
+        */
+       if (IS_ERR_OR_NULL(pqd_entry))
+               pr_warn_once("qtaguid: %s(): "
+                            "User space forgot to open /dev/xt_qtaguid? "
+                            "pid=%u tgid=%u uid=%u\n", __func__,
+                            current->pid, current->tgid, current_fsuid());
+       else
+               list_del(&sock_tag_entry->list);
+       spin_unlock_bh(&uid_tag_data_tree_lock);
+       /*
+        * We don't free tag_ref from the utd_entry here,
+        * only during a cmd_delete().
+        */
+       tag_ref_entry->num_sock_tags--;
+       spin_unlock_bh(&sock_tag_list_lock);
+       /*
+        * Release the sock_fd that was grabbed at tag time,
+        * and once more for the sockfd_lookup() here.
+        */
+       sockfd_put(sock_tag_entry->socket);
+       CT_DEBUG("qtaguid: ctrl_untag(%s): done. st@%p ...->f_count=%ld\n",
+                input, sock_tag_entry,
+                atomic_long_read(&el_socket->file->f_count) - 1);
+       sockfd_put(el_socket);
+
+       kfree(sock_tag_entry);
+       atomic64_inc(&qtu_events.sockets_untagged);
+
+       return 0;
+
+err_put:
+       CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%ld\n",
+                input, atomic_long_read(&el_socket->file->f_count) - 1);
+       /* Release the sock_fd that was grabbed by sockfd_lookup(). */
+       sockfd_put(el_socket);
+       return res;
+
+err:
+       CT_DEBUG("qtaguid: ctrl_untag(%s): done.\n", input);
+       return res;
+}
+
+static ssize_t qtaguid_ctrl_parse(const char *input, size_t count)
+{
+       char cmd;
+       ssize_t res;
+
+       CT_DEBUG("qtaguid: ctrl(%s): pid=%u tgid=%u uid=%u\n",
+                input, current->pid, current->tgid, current_fsuid());
+
+       cmd = input[0];
+       /* Collect params for commands */
+       switch (cmd) {
+       case 'd':
+               res = ctrl_cmd_delete(input);
+               break;
+
+       case 's':
+               res = ctrl_cmd_counter_set(input);
+               break;
+
+       case 't':
+               res = ctrl_cmd_tag(input);
+               break;
+
+       case 'u':
+               res = ctrl_cmd_untag(input);
+               break;
+
+       default:
+               res = -EINVAL;
+               goto err;
+       }
+       if (!res)
+               res = count;
+err:
+       CT_DEBUG("qtaguid: ctrl(%s): res=%zd\n", input, res);
+       return res;
+}
+
+#define MAX_QTAGUID_CTRL_INPUT_LEN 255
+static ssize_t qtaguid_ctrl_proc_write(struct file *file, const char __user *buffer,
+                                  size_t count, loff_t *offp)
+{
+       char input_buf[MAX_QTAGUID_CTRL_INPUT_LEN];
+
+       if (unlikely(module_passive))
+               return count;
+
+       if (count >= MAX_QTAGUID_CTRL_INPUT_LEN)
+               return -EINVAL;
+
+       if (copy_from_user(input_buf, buffer, count))
+               return -EFAULT;
+
+       input_buf[count] = '\0';
+       return qtaguid_ctrl_parse(input_buf, count);
+}
+
+struct proc_print_info {
+       struct iface_stat *iface_entry;
+       int item_index;
+       tag_t tag; /* tag found by reading to tag_pos */
+       off_t tag_pos;
+       int tag_item_index;
+};
+
+static void pp_stats_header(struct seq_file *m)
+{
+       seq_puts(m,
+                "idx iface acct_tag_hex uid_tag_int cnt_set "
+                "rx_bytes rx_packets "
+                "tx_bytes tx_packets "
+                "rx_tcp_bytes rx_tcp_packets "
+                "rx_udp_bytes rx_udp_packets "
+                "rx_other_bytes rx_other_packets "
+                "tx_tcp_bytes tx_tcp_packets "
+                "tx_udp_bytes tx_udp_packets "
+                "tx_other_bytes tx_other_packets\n");
+}
+
+static int pp_stats_line(struct seq_file *m, struct tag_stat *ts_entry,
+                        int cnt_set)
+{
+       int ret;
+       struct data_counters *cnts;
+       tag_t tag = ts_entry->tn.tag;
+       uid_t stat_uid = get_uid_from_tag(tag);
+       struct proc_print_info *ppi = m->private;
+       /* Detailed tags are not available to everybody */
+       if (get_atag_from_tag(tag) && !can_read_other_uid_stats(stat_uid)) {
+               CT_DEBUG("qtaguid: stats line: "
+                        "%s 0x%llx %u: insufficient priv "
+                        "from pid=%u tgid=%u uid=%u stats.gid=%u\n",
+                        ppi->iface_entry->ifname,
+                        get_atag_from_tag(tag), stat_uid,
+                        current->pid, current->tgid, current_fsuid(),
+                        xt_qtaguid_stats_file->gid);
+               return 0;
+       }
+       ppi->item_index++;
+       cnts = &ts_entry->counters;
+       ret = seq_printf(m, "%d %s 0x%llx %u %u "
+               "%llu %llu "
+               "%llu %llu "
+               "%llu %llu "
+               "%llu %llu "
+               "%llu %llu "
+               "%llu %llu "
+               "%llu %llu "
+               "%llu %llu\n",
+               ppi->item_index,
+               ppi->iface_entry->ifname,
+               get_atag_from_tag(tag),
+               stat_uid,
+               cnt_set,
+               dc_sum_bytes(cnts, cnt_set, IFS_RX),
+               dc_sum_packets(cnts, cnt_set, IFS_RX),
+               dc_sum_bytes(cnts, cnt_set, IFS_TX),
+               dc_sum_packets(cnts, cnt_set, IFS_TX),
+               cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes,
+               cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets,
+               cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes,
+               cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets,
+               cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes,
+               cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets,
+               cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes,
+               cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets,
+               cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes,
+               cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets,
+               cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes,
+               cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets);
+       return ret ?: 1;
+}
+
+static bool pp_sets(struct seq_file *m, struct tag_stat *ts_entry)
+{
+       int ret;
+       int counter_set;
+       for (counter_set = 0; counter_set < IFS_MAX_COUNTER_SETS;
+            counter_set++) {
+               ret = pp_stats_line(m, ts_entry, counter_set);
+               if (ret < 0)
+                       return false;
+       }
+       return true;
+}
+
+static int qtaguid_stats_proc_iface_stat_ptr_valid(struct iface_stat *ptr)
+{
+       struct iface_stat *iface_entry;
+
+       if (!ptr)
+               return false;
+
+       list_for_each_entry(iface_entry, &iface_stat_list, list)
+               if (iface_entry == ptr)
+                       return true;
+       return false;
+}
+
+static void qtaguid_stats_proc_next_iface_entry(struct proc_print_info *ppi)
+{
+       spin_unlock_bh(&ppi->iface_entry->tag_stat_list_lock);
+       list_for_each_entry_continue(ppi->iface_entry, &iface_stat_list, list) {
+               spin_lock_bh(&ppi->iface_entry->tag_stat_list_lock);
+               return;
+       }
+       ppi->iface_entry = NULL;
+}
+
+static void *qtaguid_stats_proc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct proc_print_info *ppi = m->private;
+       struct tag_stat *ts_entry;
+       struct rb_node *node;
+
+       if (!v) {
+               pr_err("qtaguid: %s(): unexpected v: NULL\n", __func__);
+               return NULL;
+       }
+
+       (*pos)++;
+
+       if (!ppi->iface_entry || unlikely(module_passive))
+               return NULL;
+
+       if (v == SEQ_START_TOKEN)
+               node = rb_first(&ppi->iface_entry->tag_stat_tree);
+       else
+               node = rb_next(&((struct tag_stat *)v)->tn.node);
+
+       while (!node) {
+               qtaguid_stats_proc_next_iface_entry(ppi);
+               if (!ppi->iface_entry)
+                       return NULL;
+               node = rb_first(&ppi->iface_entry->tag_stat_tree);
+       }
+
+       ts_entry = rb_entry(node, struct tag_stat, tn.node);
+       ppi->tag = ts_entry->tn.tag;
+       ppi->tag_pos = *pos;
+       ppi->tag_item_index = ppi->item_index;
+       return ts_entry;
+}
+
+static void *qtaguid_stats_proc_start(struct seq_file *m, loff_t *pos)
+{
+       struct proc_print_info *ppi = m->private;
+       struct tag_stat *ts_entry = NULL;
+
+       spin_lock_bh(&iface_stat_list_lock);
+
+       if (*pos == 0) {
+               ppi->item_index = 1;
+               ppi->tag_pos = 0;
+               if (list_empty(&iface_stat_list)) {
+                       ppi->iface_entry = NULL;
+               } else {
+                       ppi->iface_entry = list_first_entry(&iface_stat_list,
+                                                           struct iface_stat,
+                                                           list);
+                       spin_lock_bh(&ppi->iface_entry->tag_stat_list_lock);
+               }
+               return SEQ_START_TOKEN;
+       }
+       if (!qtaguid_stats_proc_iface_stat_ptr_valid(ppi->iface_entry)) {
+               if (ppi->iface_entry) {
+                       pr_err("qtaguid: %s(): iface_entry %p not found\n",
+                              __func__, ppi->iface_entry);
+                       ppi->iface_entry = NULL;
+               }
+               return NULL;
+       }
+
+       spin_lock_bh(&ppi->iface_entry->tag_stat_list_lock);
+
+       if (!ppi->tag_pos) {
+               /* seq_read skipped first next call */
+               ts_entry = SEQ_START_TOKEN;
+       } else {
+               ts_entry = tag_stat_tree_search(
+                               &ppi->iface_entry->tag_stat_tree, ppi->tag);
+               if (!ts_entry) {
+                       pr_info("qtaguid: %s(): tag_stat.tag 0x%llx not found. Abort.\n",
+                               __func__, ppi->tag);
+                       return NULL;
+               }
+       }
+
+       if (*pos == ppi->tag_pos) { /* normal resume */
+               ppi->item_index = ppi->tag_item_index;
+       } else {
+               /* seq_read skipped a next call */
+               *pos = ppi->tag_pos;
+               ts_entry = qtaguid_stats_proc_next(m, ts_entry, pos);
+       }
+
+       return ts_entry;
+}
+
+static void qtaguid_stats_proc_stop(struct seq_file *m, void *v)
+{
+       struct proc_print_info *ppi = m->private;
+       if (ppi->iface_entry)
+               spin_unlock_bh(&ppi->iface_entry->tag_stat_list_lock);
+       spin_unlock_bh(&iface_stat_list_lock);
+}
+
+/*
+ * Procfs reader to get all tag stats using style "1)" as described in
+ * fs/proc/generic.c
+ * Groups all protocols tx/rx bytes.
+ */
+static int qtaguid_stats_proc_show(struct seq_file *m, void *v)
+{
+       struct tag_stat *ts_entry = v;
+
+       if (v == SEQ_START_TOKEN)
+               pp_stats_header(m);
+       else
+               pp_sets(m, ts_entry);
+
+       return 0;
+}
+
+/*------------------------------------------*/
+static int qtudev_open(struct inode *inode, struct file *file)
+{
+       struct uid_tag_data *utd_entry;
+       struct proc_qtu_data  *pqd_entry;
+       struct proc_qtu_data  *new_pqd_entry;
+       int res;
+       bool utd_entry_found;
+
+       if (unlikely(qtu_proc_handling_passive))
+               return 0;
+
+       DR_DEBUG("qtaguid: qtudev_open(): pid=%u tgid=%u uid=%u\n",
+                current->pid, current->tgid, current_fsuid());
+
+       spin_lock_bh(&uid_tag_data_tree_lock);
+
+       /* Look for existing uid data, or alloc one. */
+       utd_entry = get_uid_data(current_fsuid(), &utd_entry_found);
+       if (IS_ERR_OR_NULL(utd_entry)) {
+               res = PTR_ERR(utd_entry);
+               goto err_unlock;
+       }
+
+       /* Look for existing PID based proc_data */
+       pqd_entry = proc_qtu_data_tree_search(&proc_qtu_data_tree,
+                                             current->tgid);
+       if (pqd_entry) {
+               pr_err("qtaguid: qtudev_open(): %u/%u %u "
+                      "%s already opened\n",
+                      current->pid, current->tgid, current_fsuid(),
+                      QTU_DEV_NAME);
+               res = -EBUSY;
+               goto err_unlock_free_utd;
+       }
+
+       new_pqd_entry = kzalloc(sizeof(*new_pqd_entry), GFP_ATOMIC);
+       if (!new_pqd_entry) {
+               pr_err("qtaguid: qtudev_open(): %u/%u %u: "
+                      "proc data alloc failed\n",
+                      current->pid, current->tgid, current_fsuid());
+               res = -ENOMEM;
+               goto err_unlock_free_utd;
+       }
+       new_pqd_entry->pid = current->tgid;
+       INIT_LIST_HEAD(&new_pqd_entry->sock_tag_list);
+       new_pqd_entry->parent_tag_data = utd_entry;
+       utd_entry->num_pqd++;
+
+       proc_qtu_data_tree_insert(new_pqd_entry,
+                                 &proc_qtu_data_tree);
+
+       spin_unlock_bh(&uid_tag_data_tree_lock);
+       DR_DEBUG("qtaguid: tracking data for uid=%u in pqd=%p\n",
+                current_fsuid(), new_pqd_entry);
+       file->private_data = new_pqd_entry;
+       return 0;
+
+err_unlock_free_utd:
+       if (!utd_entry_found) {
+               rb_erase(&utd_entry->node, &uid_tag_data_tree);
+               kfree(utd_entry);
+       }
+err_unlock:
+       spin_unlock_bh(&uid_tag_data_tree_lock);
+       return res;
+}
+
+static int qtudev_release(struct inode *inode, struct file *file)
+{
+       struct proc_qtu_data  *pqd_entry = file->private_data;
+       struct uid_tag_data  *utd_entry = pqd_entry->parent_tag_data;
+       struct sock_tag *st_entry;
+       struct rb_root st_to_free_tree = RB_ROOT;
+       struct list_head *entry, *next;
+       struct tag_ref *tr;
+
+       if (unlikely(qtu_proc_handling_passive))
+               return 0;
+
+       /*
+        * Do not trust the current->pid, it might just be a kworker cleaning
+        * up after a dead proc.
+        */
+       DR_DEBUG("qtaguid: qtudev_release(): "
+                "pid=%u tgid=%u uid=%u "
+                "pqd_entry=%p->pid=%u utd_entry=%p->active_tags=%d\n",
+                current->pid, current->tgid, pqd_entry->parent_tag_data->uid,
+                pqd_entry, pqd_entry->pid, utd_entry,
+                utd_entry->num_active_tags);
+
+       spin_lock_bh(&sock_tag_list_lock);
+       spin_lock_bh(&uid_tag_data_tree_lock);
+
+       list_for_each_safe(entry, next, &pqd_entry->sock_tag_list) {
+               st_entry = list_entry(entry, struct sock_tag, list);
+               DR_DEBUG("qtaguid: %s(): "
+                        "erase sock_tag=%p->sk=%p pid=%u tgid=%u uid=%u\n",
+                        __func__,
+                        st_entry, st_entry->sk,
+                        current->pid, current->tgid,
+                        pqd_entry->parent_tag_data->uid);
+
+               utd_entry = uid_tag_data_tree_search(
+                       &uid_tag_data_tree,
+                       get_uid_from_tag(st_entry->tag));
+               BUG_ON(IS_ERR_OR_NULL(utd_entry));
+               DR_DEBUG("qtaguid: %s(): "
+                        "looking for tag=0x%llx in utd_entry=%p\n", __func__,
+                        st_entry->tag, utd_entry);
+               tr = tag_ref_tree_search(&utd_entry->tag_ref_tree,
+                                        st_entry->tag);
+               BUG_ON(!tr);
+               BUG_ON(tr->num_sock_tags <= 0);
+               tr->num_sock_tags--;
+               free_tag_ref_from_utd_entry(tr, utd_entry);
+
+               rb_erase(&st_entry->sock_node, &sock_tag_tree);
+               list_del(&st_entry->list);
+               /* Can't sockfd_put() within spinlock, do it later. */
+               sock_tag_tree_insert(st_entry, &st_to_free_tree);
+
+               /*
+                * Try to free the utd_entry if no other proc_qtu_data is
+                * using it (num_pqd is 0) and it doesn't have active tags
+                * (num_active_tags is 0).
+                */
+               put_utd_entry(utd_entry);
+       }
+
+       rb_erase(&pqd_entry->node, &proc_qtu_data_tree);
+       BUG_ON(pqd_entry->parent_tag_data->num_pqd < 1);
+       pqd_entry->parent_tag_data->num_pqd--;
+       put_utd_entry(pqd_entry->parent_tag_data);
+       kfree(pqd_entry);
+       file->private_data = NULL;
+
+       spin_unlock_bh(&uid_tag_data_tree_lock);
+       spin_unlock_bh(&sock_tag_list_lock);
+
+
+       sock_tag_tree_erase(&st_to_free_tree);
+
+       prdebug_full_state(0, "%s(): pid=%u tgid=%u", __func__,
+                          current->pid, current->tgid);
+       return 0;
+}
+
+/*------------------------------------------*/
+static const struct file_operations qtudev_fops = {
+       .owner = THIS_MODULE,
+       .open = qtudev_open,
+       .release = qtudev_release,
+};
+
+static struct miscdevice qtu_device = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = QTU_DEV_NAME,
+       .fops = &qtudev_fops,
+       /* How sad it doesn't allow for defaults: .mode = S_IRUGO | S_IWUSR */
+};
+
+static const struct seq_operations proc_qtaguid_ctrl_seqops = {
+       .start = qtaguid_ctrl_proc_start,
+       .next = qtaguid_ctrl_proc_next,
+       .stop = qtaguid_ctrl_proc_stop,
+       .show = qtaguid_ctrl_proc_show,
+};
+
+static int proc_qtaguid_ctrl_open(struct inode *inode, struct file *file)
+{
+       return seq_open_private(file, &proc_qtaguid_ctrl_seqops,
+                               sizeof(struct proc_ctrl_print_info));
+}
+
+static const struct file_operations proc_qtaguid_ctrl_fops = {
+       .open           = proc_qtaguid_ctrl_open,
+       .read           = seq_read,
+       .write          = qtaguid_ctrl_proc_write,
+       .llseek         = seq_lseek,
+       .release        = seq_release_private,
+};
+
+static const struct seq_operations proc_qtaguid_stats_seqops = {
+       .start = qtaguid_stats_proc_start,
+       .next = qtaguid_stats_proc_next,
+       .stop = qtaguid_stats_proc_stop,
+       .show = qtaguid_stats_proc_show,
+};
+
+static int proc_qtaguid_stats_open(struct inode *inode, struct file *file)
+{
+       return seq_open_private(file, &proc_qtaguid_stats_seqops,
+                               sizeof(struct proc_print_info));
+}
+
+static const struct file_operations proc_qtaguid_stats_fops = {
+       .open           = proc_qtaguid_stats_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release_private,
+};
+
+/*------------------------------------------*/
+static int __init qtaguid_proc_register(struct proc_dir_entry **res_procdir)
+{
+       int ret;
+       *res_procdir = proc_mkdir(module_procdirname, init_net.proc_net);
+       if (!*res_procdir) {
+               pr_err("qtaguid: failed to create proc/.../xt_qtaguid\n");
+               ret = -ENOMEM;
+               goto no_dir;
+       }
+
+       xt_qtaguid_ctrl_file = proc_create_data("ctrl", proc_ctrl_perms,
+                                               *res_procdir,
+                                               &proc_qtaguid_ctrl_fops,
+                                               NULL);
+       if (!xt_qtaguid_ctrl_file) {
+               pr_err("qtaguid: failed to create xt_qtaguid/ctrl "
+                       " file\n");
+               ret = -ENOMEM;
+               goto no_ctrl_entry;
+       }
+
+       xt_qtaguid_stats_file = proc_create_data("stats", proc_stats_perms,
+                                                *res_procdir,
+                                                &proc_qtaguid_stats_fops,
+                                                NULL);
+       if (!xt_qtaguid_stats_file) {
+               pr_err("qtaguid: failed to create xt_qtaguid/stats "
+                       "file\n");
+               ret = -ENOMEM;
+               goto no_stats_entry;
+       }
+       /*
+        * TODO: add support counter hacking
+        * xt_qtaguid_stats_file->write_proc = qtaguid_stats_proc_write;
+        */
+       return 0;
+
+no_stats_entry:
+       remove_proc_entry("ctrl", *res_procdir);
+no_ctrl_entry:
+       remove_proc_entry("xt_qtaguid", NULL);
+no_dir:
+       return ret;
+}
+
+static struct xt_match qtaguid_mt_reg __read_mostly = {
+       /*
+        * This module masquerades as the "owner" module so that iptables
+        * tools can deal with it.
+        */
+       .name       = "owner",
+       .revision   = 1,
+       .family     = NFPROTO_UNSPEC,
+       .match      = qtaguid_mt,
+       .matchsize  = sizeof(struct xt_qtaguid_match_info),
+       .me         = THIS_MODULE,
+};
+
+static int __init qtaguid_mt_init(void)
+{
+       if (qtaguid_proc_register(&xt_qtaguid_procdir)
+           || iface_stat_init(xt_qtaguid_procdir)
+           || xt_register_match(&qtaguid_mt_reg)
+           || misc_register(&qtu_device))
+               return -1;
+       return 0;
+}
+
+/*
+ * TODO: allow unloading of the module.
+ * For now stats are permanent.
+ * Kconfig forces'y/n' and never an 'm'.
+ */
+
+module_init(qtaguid_mt_init);
+MODULE_AUTHOR("jpa <jpa@google.com>");
+MODULE_DESCRIPTION("Xtables: socket owner+tag matching and associated stats");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_owner");
+MODULE_ALIAS("ip6t_owner");
+MODULE_ALIAS("ipt_qtaguid");
+MODULE_ALIAS("ip6t_qtaguid");
diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h
new file mode 100644 (file)
index 0000000..6dc14a9
--- /dev/null
@@ -0,0 +1,352 @@
+/*
+ * Kernel iptables module to track stats for packets based on user tags.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __XT_QTAGUID_INTERNAL_H__
+#define __XT_QTAGUID_INTERNAL_H__
+
+#include <linux/types.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock_types.h>
+#include <linux/workqueue.h>
+
+/* Iface handling */
+#define IDEBUG_MASK (1<<0)
+/* Iptable Matching. Per packet. */
+#define MDEBUG_MASK (1<<1)
+/* Red-black tree handling. Per packet. */
+#define RDEBUG_MASK (1<<2)
+/* procfs ctrl/stats handling */
+#define CDEBUG_MASK (1<<3)
+/* dev and resource tracking */
+#define DDEBUG_MASK (1<<4)
+
+/* E.g (IDEBUG_MASK | CDEBUG_MASK | DDEBUG_MASK) */
+#define DEFAULT_DEBUG_MASK 0
+
+/*
+ * (Un)Define these *DEBUG to compile out/in the pr_debug calls.
+ * All undef: text size ~ 0x3030; all def: ~ 0x4404.
+ */
+#define IDEBUG
+#define MDEBUG
+#define RDEBUG
+#define CDEBUG
+#define DDEBUG
+
+#define MSK_DEBUG(mask, ...) do {                           \
+               if (unlikely(qtaguid_debug_mask & (mask)))  \
+                       pr_debug(__VA_ARGS__);              \
+       } while (0)
+#ifdef IDEBUG
+#define IF_DEBUG(...) MSK_DEBUG(IDEBUG_MASK, __VA_ARGS__)
+#else
+#define IF_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef MDEBUG
+#define MT_DEBUG(...) MSK_DEBUG(MDEBUG_MASK, __VA_ARGS__)
+#else
+#define MT_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef RDEBUG
+#define RB_DEBUG(...) MSK_DEBUG(RDEBUG_MASK, __VA_ARGS__)
+#else
+#define RB_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef CDEBUG
+#define CT_DEBUG(...) MSK_DEBUG(CDEBUG_MASK, __VA_ARGS__)
+#else
+#define CT_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef DDEBUG
+#define DR_DEBUG(...) MSK_DEBUG(DDEBUG_MASK, __VA_ARGS__)
+#else
+#define DR_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+
+extern uint qtaguid_debug_mask;
+
+/*---------------------------------------------------------------------------*/
+/*
+ * Tags:
+ *
+ * They represent what the data usage counters will be tracked against.
+ * By default a tag is just based on the UID.
+ * The UID is used as the base for policing, and can not be ignored.
+ * So a tag will always at least represent a UID (uid_tag).
+ *
+ * A tag can be augmented with an "accounting tag" which is associated
+ * with a UID.
+ * User space can set the acct_tag portion of the tag which is then used
+ * with sockets: all data belonging to that socket will be counted against the
+ * tag. The policing is then based on the tag's uid_tag portion,
+ * and stats are collected for the acct_tag portion separately.
+ *
+ * There could be
+ * a:  {acct_tag=1, uid_tag=10003}
+ * b:  {acct_tag=2, uid_tag=10003}
+ * c:  {acct_tag=3, uid_tag=10003}
+ * d:  {acct_tag=0, uid_tag=10003}
+ * a, b, and c represent tags associated with specific sockets.
+ * d is for the totals for that uid, including all untagged traffic.
+ * Typically d is used with policing/quota rules.
+ *
+ * We want tag_t big enough to distinguish uid_t and acct_tag.
+ * It might become a struct if needed.
+ * Nothing should be using it as an int.
+ */
+typedef uint64_t tag_t;  /* Only used via accessors */
+
+#define TAG_UID_MASK 0xFFFFFFFFULL
+#define TAG_ACCT_MASK (~0xFFFFFFFFULL)
+
+static inline int tag_compare(tag_t t1, tag_t t2)
+{
+       return t1 < t2 ? -1 : t1 == t2 ? 0 : 1;
+}
+
+static inline tag_t combine_atag_with_uid(tag_t acct_tag, uid_t uid)
+{
+       return acct_tag | uid;
+}
+static inline tag_t make_tag_from_uid(uid_t uid)
+{
+       return uid;
+}
+static inline uid_t get_uid_from_tag(tag_t tag)
+{
+       return tag & TAG_UID_MASK;
+}
+static inline tag_t get_utag_from_tag(tag_t tag)
+{
+       return tag & TAG_UID_MASK;
+}
+static inline tag_t get_atag_from_tag(tag_t tag)
+{
+       return tag & TAG_ACCT_MASK;
+}
+
+static inline bool valid_atag(tag_t tag)
+{
+       return !(tag & TAG_UID_MASK);
+}
+static inline tag_t make_atag_from_value(uint32_t value)
+{
+       return (uint64_t)value << 32;
+}
+/*---------------------------------------------------------------------------*/
+
+/*
+ * Maximum number of socket tags that a UID is allowed to have active.
+ * Multiple processes belonging to the same UID contribute towards this limit.
+ * Special UIDs that can impersonate a UID also contribute (e.g. download
+ * manager, ...)
+ */
+#define DEFAULT_MAX_SOCK_TAGS 1024
+
+/*
+ * For now we only track 2 sets of counters.
+ * The default set is 0.
+ * Userspace can activate another set for a given uid being tracked.
+ */
+#define IFS_MAX_COUNTER_SETS 2
+
+enum ifs_tx_rx {
+       IFS_TX,
+       IFS_RX,
+       IFS_MAX_DIRECTIONS
+};
+
+/* For now, TCP, UDP, the rest */
+enum ifs_proto {
+       IFS_TCP,
+       IFS_UDP,
+       IFS_PROTO_OTHER,
+       IFS_MAX_PROTOS
+};
+
+struct byte_packet_counters {
+       uint64_t bytes;
+       uint64_t packets;
+};
+
+struct data_counters {
+       struct byte_packet_counters bpc[IFS_MAX_COUNTER_SETS][IFS_MAX_DIRECTIONS][IFS_MAX_PROTOS];
+};
+
+static inline uint64_t dc_sum_bytes(struct data_counters *counters,
+                                   int set,
+                                   enum ifs_tx_rx direction)
+{
+       return counters->bpc[set][direction][IFS_TCP].bytes
+               + counters->bpc[set][direction][IFS_UDP].bytes
+               + counters->bpc[set][direction][IFS_PROTO_OTHER].bytes;
+}
+
+static inline uint64_t dc_sum_packets(struct data_counters *counters,
+                                     int set,
+                                     enum ifs_tx_rx direction)
+{
+       return counters->bpc[set][direction][IFS_TCP].packets
+               + counters->bpc[set][direction][IFS_UDP].packets
+               + counters->bpc[set][direction][IFS_PROTO_OTHER].packets;
+}
+
+
+/* Generic X based nodes used as a base for rb_tree ops */
+struct tag_node {
+       struct rb_node node;
+       tag_t tag;
+};
+
+struct tag_stat {
+       struct tag_node tn;
+       struct data_counters counters;
+       /*
+        * If this tag is acct_tag based, we need to count against the
+        * matching parent uid_tag.
+        */
+       struct data_counters *parent_counters;
+};
+
+struct iface_stat {
+       struct list_head list;  /* in iface_stat_list */
+       char *ifname;
+       bool active;
+       /* net_dev is only valid for active iface_stat */
+       struct net_device *net_dev;
+
+       struct byte_packet_counters totals_via_dev[IFS_MAX_DIRECTIONS];
+       struct data_counters totals_via_skb;
+       /*
+        * We keep the last_known, because some devices reset their counters
+        * just before NETDEV_UP, while some will reset just before
+        * NETDEV_REGISTER (which is more normal).
+        * So now, if the device didn't do a NETDEV_UNREGISTER and we see
+        * its current dev stats smaller that what was previously known, we
+        * assume an UNREGISTER and just use the last_known.
+        */
+       struct byte_packet_counters last_known[IFS_MAX_DIRECTIONS];
+       /* last_known is usable when last_known_valid is true */
+       bool last_known_valid;
+
+       struct proc_dir_entry *proc_ptr;
+
+       struct rb_root tag_stat_tree;
+       spinlock_t tag_stat_list_lock;
+};
+
+/* This is needed to create proc_dir_entries from atomic context. */
+struct iface_stat_work {
+       struct work_struct iface_work;
+       struct iface_stat *iface_entry;
+};
+
+/*
+ * Track tag that this socket is transferring data for, and not necessarily
+ * the uid that owns the socket.
+ * This is the tag against which tag_stat.counters will be billed.
+ * These structs need to be looked up by sock and pid.
+ */
+struct sock_tag {
+       struct rb_node sock_node;
+       struct sock *sk;  /* Only used as a number, never dereferenced */
+       /* The socket is needed for sockfd_put() */
+       struct socket *socket;
+       /* Used to associate with a given pid */
+       struct list_head list;   /* in proc_qtu_data.sock_tag_list */
+       pid_t pid;
+
+       tag_t tag;
+};
+
+struct qtaguid_event_counts {
+       /* Various successful events */
+       atomic64_t sockets_tagged;
+       atomic64_t sockets_untagged;
+       atomic64_t counter_set_changes;
+       atomic64_t delete_cmds;
+       atomic64_t iface_events;  /* Number of NETDEV_* events handled */
+
+       atomic64_t match_calls;   /* Number of times iptables called mt */
+       /* Number of times iptables called mt from pre or post routing hooks */
+       atomic64_t match_calls_prepost;
+       /*
+        * match_found_sk_*: numbers related to the netfilter matching
+        * function finding a sock for the sk_buff.
+        * Total skbs processed is sum(match_found*).
+        */
+       atomic64_t match_found_sk;   /* An sk was already in the sk_buff. */
+       /* The connection tracker had or didn't have the sk. */
+       atomic64_t match_found_sk_in_ct;
+       atomic64_t match_found_no_sk_in_ct;
+       /*
+        * No sk could be found. No apparent owner. Could happen with
+        * unsolicited traffic.
+        */
+       atomic64_t match_no_sk;
+       /*
+        * The file ptr in the sk_socket wasn't there.
+        * This might happen for traffic while the socket is being closed.
+        */
+       atomic64_t match_no_sk_file;
+};
+
+/* Track the set active_set for the given tag. */
+struct tag_counter_set {
+       struct tag_node tn;
+       int active_set;
+};
+
+/*----------------------------------------------*/
+/*
+ * The qtu uid data is used to track resources that are created directly or
+ * indirectly by processes (uid tracked).
+ * It is shared by the processes with the same uid.
+ * Some of the resource will be counted to prevent further rogue allocations,
+ * some will need freeing once the owner process (uid) exits.
+ */
+struct uid_tag_data {
+       struct rb_node node;
+       uid_t uid;
+
+       /*
+        * For the uid, how many accounting tags have been set.
+        */
+       int num_active_tags;
+       /* Track the number of proc_qtu_data that reference it */
+       int num_pqd;
+       struct rb_root tag_ref_tree;
+       /* No tag_node_tree_lock; use uid_tag_data_tree_lock */
+};
+
+struct tag_ref {
+       struct tag_node tn;
+
+       /*
+        * This tracks the number of active sockets that have a tag on them
+        * which matches this tag_ref.tn.tag.
+        * A tag ref can live on after the sockets are untagged.
+        * A tag ref can only be removed during a tag delete command.
+        */
+       int num_sock_tags;
+};
+
+struct proc_qtu_data {
+       struct rb_node node;
+       pid_t pid;
+
+       struct uid_tag_data *parent_tag_data;
+
+       /* Tracks the sock_tags that need freeing upon this proc's death */
+       struct list_head sock_tag_list;
+       /* No spinlock_t sock_tag_list_lock; use the global one. */
+};
+
+/*----------------------------------------------*/
+#endif  /* ifndef __XT_QTAGUID_INTERNAL_H__ */
diff --git a/net/netfilter/xt_qtaguid_print.c b/net/netfilter/xt_qtaguid_print.c
new file mode 100644 (file)
index 0000000..f6a00a3
--- /dev/null
@@ -0,0 +1,566 @@
+/*
+ * Pretty printing Support for iptables xt_qtaguid module.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Most of the functions in this file just waste time if DEBUG is not defined.
+ * The matching xt_qtaguid_print.h will static inline empty funcs if the needed
+ * debug flags ore not defined.
+ * Those funcs that fail to allocate memory will panic as there is no need to
+ * hobble allong just pretending to do the requested work.
+ */
+
+#define DEBUG
+
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/net.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+
+
+#include "xt_qtaguid_internal.h"
+#include "xt_qtaguid_print.h"
+
+#ifdef DDEBUG
+
+static void _bug_on_err_or_null(void *ptr)
+{
+       if (IS_ERR_OR_NULL(ptr)) {
+               pr_err("qtaguid: kmalloc failed\n");
+               BUG();
+       }
+}
+
+char *pp_tag_t(tag_t *tag)
+{
+       char *res;
+
+       if (!tag)
+               res = kasprintf(GFP_ATOMIC, "tag_t@null{}");
+       else
+               res = kasprintf(GFP_ATOMIC,
+                               "tag_t@%p{tag=0x%llx, uid=%u}",
+                               tag, *tag, get_uid_from_tag(*tag));
+       _bug_on_err_or_null(res);
+       return res;
+}
+
+char *pp_data_counters(struct data_counters *dc, bool showValues)
+{
+       char *res;
+
+       if (!dc)
+               res = kasprintf(GFP_ATOMIC, "data_counters@null{}");
+       else if (showValues)
+               res = kasprintf(
+                       GFP_ATOMIC, "data_counters@%p{"
+                       "set0{"
+                       "rx{"
+                       "tcp{b=%llu, p=%llu}, "
+                       "udp{b=%llu, p=%llu},"
+                       "other{b=%llu, p=%llu}}, "
+                       "tx{"
+                       "tcp{b=%llu, p=%llu}, "
+                       "udp{b=%llu, p=%llu},"
+                       "other{b=%llu, p=%llu}}}, "
+                       "set1{"
+                       "rx{"
+                       "tcp{b=%llu, p=%llu}, "
+                       "udp{b=%llu, p=%llu},"
+                       "other{b=%llu, p=%llu}}, "
+                       "tx{"
+                       "tcp{b=%llu, p=%llu}, "
+                       "udp{b=%llu, p=%llu},"
+                       "other{b=%llu, p=%llu}}}}",
+                       dc,
+                       dc->bpc[0][IFS_RX][IFS_TCP].bytes,
+                       dc->bpc[0][IFS_RX][IFS_TCP].packets,
+                       dc->bpc[0][IFS_RX][IFS_UDP].bytes,
+                       dc->bpc[0][IFS_RX][IFS_UDP].packets,
+                       dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes,
+                       dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets,
+                       dc->bpc[0][IFS_TX][IFS_TCP].bytes,
+                       dc->bpc[0][IFS_TX][IFS_TCP].packets,
+                       dc->bpc[0][IFS_TX][IFS_UDP].bytes,
+                       dc->bpc[0][IFS_TX][IFS_UDP].packets,
+                       dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes,
+                       dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets,
+                       dc->bpc[1][IFS_RX][IFS_TCP].bytes,
+                       dc->bpc[1][IFS_RX][IFS_TCP].packets,
+                       dc->bpc[1][IFS_RX][IFS_UDP].bytes,
+                       dc->bpc[1][IFS_RX][IFS_UDP].packets,
+                       dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes,
+                       dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets,
+                       dc->bpc[1][IFS_TX][IFS_TCP].bytes,
+                       dc->bpc[1][IFS_TX][IFS_TCP].packets,
+                       dc->bpc[1][IFS_TX][IFS_UDP].bytes,
+                       dc->bpc[1][IFS_TX][IFS_UDP].packets,
+                       dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes,
+                       dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets);
+       else
+               res = kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc);
+       _bug_on_err_or_null(res);
+       return res;
+}
+
+char *pp_tag_node(struct tag_node *tn)
+{
+       char *tag_str;
+       char *res;
+
+       if (!tn) {
+               res = kasprintf(GFP_ATOMIC, "tag_node@null{}");
+               _bug_on_err_or_null(res);
+               return res;
+       }
+       tag_str = pp_tag_t(&tn->tag);
+       res = kasprintf(GFP_ATOMIC,
+                       "tag_node@%p{tag=%s}",
+                       tn, tag_str);
+       _bug_on_err_or_null(res);
+       kfree(tag_str);
+       return res;
+}
+
+char *pp_tag_ref(struct tag_ref *tr)
+{
+       char *tn_str;
+       char *res;
+
+       if (!tr) {
+               res = kasprintf(GFP_ATOMIC, "tag_ref@null{}");
+               _bug_on_err_or_null(res);
+               return res;
+       }
+       tn_str = pp_tag_node(&tr->tn);
+       res = kasprintf(GFP_ATOMIC,
+                       "tag_ref@%p{%s, num_sock_tags=%d}",
+                       tr, tn_str, tr->num_sock_tags);
+       _bug_on_err_or_null(res);
+       kfree(tn_str);
+       return res;
+}
+
+char *pp_tag_stat(struct tag_stat *ts)
+{
+       char *tn_str;
+       char *counters_str;
+       char *parent_counters_str;
+       char *res;
+
+       if (!ts) {
+               res = kasprintf(GFP_ATOMIC, "tag_stat@null{}");
+               _bug_on_err_or_null(res);
+               return res;
+       }
+       tn_str = pp_tag_node(&ts->tn);
+       counters_str = pp_data_counters(&ts->counters, true);
+       parent_counters_str = pp_data_counters(ts->parent_counters, false);
+       res = kasprintf(GFP_ATOMIC,
+                       "tag_stat@%p{%s, counters=%s, parent_counters=%s}",
+                       ts, tn_str, counters_str, parent_counters_str);
+       _bug_on_err_or_null(res);
+       kfree(tn_str);
+       kfree(counters_str);
+       kfree(parent_counters_str);
+       return res;
+}
+
+char *pp_iface_stat(struct iface_stat *is)
+{
+       char *res;
+       if (!is) {
+               res = kasprintf(GFP_ATOMIC, "iface_stat@null{}");
+       } else {
+               struct data_counters *cnts = &is->totals_via_skb;
+               res = kasprintf(GFP_ATOMIC, "iface_stat@%p{"
+                               "list=list_head{...}, "
+                               "ifname=%s, "
+                               "total_dev={rx={bytes=%llu, "
+                               "packets=%llu}, "
+                               "tx={bytes=%llu, "
+                               "packets=%llu}}, "
+                               "total_skb={rx={bytes=%llu, "
+                               "packets=%llu}, "
+                               "tx={bytes=%llu, "
+                               "packets=%llu}}, "
+                               "last_known_valid=%d, "
+                               "last_known={rx={bytes=%llu, "
+                               "packets=%llu}, "
+                               "tx={bytes=%llu, "
+                               "packets=%llu}}, "
+                               "active=%d, "
+                               "net_dev=%p, "
+                               "proc_ptr=%p, "
+                               "tag_stat_tree=rb_root{...}}",
+                               is,
+                               is->ifname,
+                               is->totals_via_dev[IFS_RX].bytes,
+                               is->totals_via_dev[IFS_RX].packets,
+                               is->totals_via_dev[IFS_TX].bytes,
+                               is->totals_via_dev[IFS_TX].packets,
+                               dc_sum_bytes(cnts, 0, IFS_RX),
+                               dc_sum_packets(cnts, 0, IFS_RX),
+                               dc_sum_bytes(cnts, 0, IFS_TX),
+                               dc_sum_packets(cnts, 0, IFS_TX),
+                               is->last_known_valid,
+                               is->last_known[IFS_RX].bytes,
+                               is->last_known[IFS_RX].packets,
+                               is->last_known[IFS_TX].bytes,
+                               is->last_known[IFS_TX].packets,
+                               is->active,
+                               is->net_dev,
+                               is->proc_ptr);
+       }
+       _bug_on_err_or_null(res);
+       return res;
+}
+
+char *pp_sock_tag(struct sock_tag *st)
+{
+       char *tag_str;
+       char *res;
+
+       if (!st) {
+               res = kasprintf(GFP_ATOMIC, "sock_tag@null{}");
+               _bug_on_err_or_null(res);
+               return res;
+       }
+       tag_str = pp_tag_t(&st->tag);
+       res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
+                       "sock_node=rb_node{...}, "
+                       "sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
+                       "pid=%u, tag=%s}",
+                       st, st->sk, st->socket, atomic_long_read(
+                               &st->socket->file->f_count),
+                       st->pid, tag_str);
+       _bug_on_err_or_null(res);
+       kfree(tag_str);
+       return res;
+}
+
+char *pp_uid_tag_data(struct uid_tag_data *utd)
+{
+       char *res;
+
+       if (!utd)
+               res = kasprintf(GFP_ATOMIC, "uid_tag_data@null{}");
+       else
+               res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{"
+                               "uid=%u, num_active_acct_tags=%d, "
+                               "num_pqd=%d, "
+                               "tag_node_tree=rb_root{...}, "
+                               "proc_qtu_data_tree=rb_root{...}}",
+                               utd, utd->uid,
+                               utd->num_active_tags, utd->num_pqd);
+       _bug_on_err_or_null(res);
+       return res;
+}
+
+char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
+{
+       char *parent_tag_data_str;
+       char *res;
+
+       if (!pqd) {
+               res = kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}");
+               _bug_on_err_or_null(res);
+               return res;
+       }
+       parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data);
+       res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{"
+                       "node=rb_node{...}, pid=%u, "
+                       "parent_tag_data=%s, "
+                       "sock_tag_list=list_head{...}}",
+                       pqd, pqd->pid, parent_tag_data_str
+               );
+       _bug_on_err_or_null(res);
+       kfree(parent_tag_data_str);
+       return res;
+}
+
+/*------------------------------------------*/
+void prdebug_sock_tag_tree(int indent_level,
+                          struct rb_root *sock_tag_tree)
+{
+       struct rb_node *node;
+       struct sock_tag *sock_tag_entry;
+       char *str;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       if (RB_EMPTY_ROOT(sock_tag_tree)) {
+               str = "sock_tag_tree=rb_root{}";
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               return;
+       }
+
+       str = "sock_tag_tree=rb_root{";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+       indent_level++;
+       for (node = rb_first(sock_tag_tree);
+            node;
+            node = rb_next(node)) {
+               sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+               str = pp_sock_tag(sock_tag_entry);
+               pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+               kfree(str);
+       }
+       indent_level--;
+       str = "}";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_sock_tag_list(int indent_level,
+                          struct list_head *sock_tag_list)
+{
+       struct sock_tag *sock_tag_entry;
+       char *str;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       if (list_empty(sock_tag_list)) {
+               str = "sock_tag_list=list_head{}";
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               return;
+       }
+
+       str = "sock_tag_list=list_head{";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+       indent_level++;
+       list_for_each_entry(sock_tag_entry, sock_tag_list, list) {
+               str = pp_sock_tag(sock_tag_entry);
+               pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+               kfree(str);
+       }
+       indent_level--;
+       str = "}";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_proc_qtu_data_tree(int indent_level,
+                               struct rb_root *proc_qtu_data_tree)
+{
+       char *str;
+       struct rb_node *node;
+       struct proc_qtu_data *proc_qtu_data_entry;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       if (RB_EMPTY_ROOT(proc_qtu_data_tree)) {
+               str = "proc_qtu_data_tree=rb_root{}";
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               return;
+       }
+
+       str = "proc_qtu_data_tree=rb_root{";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+       indent_level++;
+       for (node = rb_first(proc_qtu_data_tree);
+            node;
+            node = rb_next(node)) {
+               proc_qtu_data_entry = rb_entry(node,
+                                              struct proc_qtu_data,
+                                              node);
+               str = pp_proc_qtu_data(proc_qtu_data_entry);
+               pr_debug("%*d: %s,\n", indent_level*2, indent_level,
+                        str);
+               kfree(str);
+               indent_level++;
+               prdebug_sock_tag_list(indent_level,
+                                     &proc_qtu_data_entry->sock_tag_list);
+               indent_level--;
+
+       }
+       indent_level--;
+       str = "}";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
+{
+       char *str;
+       struct rb_node *node;
+       struct tag_ref *tag_ref_entry;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       if (RB_EMPTY_ROOT(tag_ref_tree)) {
+               str = "tag_ref_tree{}";
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               return;
+       }
+
+       str = "tag_ref_tree{";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+       indent_level++;
+       for (node = rb_first(tag_ref_tree);
+            node;
+            node = rb_next(node)) {
+               tag_ref_entry = rb_entry(node,
+                                        struct tag_ref,
+                                        tn.node);
+               str = pp_tag_ref(tag_ref_entry);
+               pr_debug("%*d: %s,\n", indent_level*2, indent_level,
+                        str);
+               kfree(str);
+       }
+       indent_level--;
+       str = "}";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_uid_tag_data_tree(int indent_level,
+                              struct rb_root *uid_tag_data_tree)
+{
+       char *str;
+       struct rb_node *node;
+       struct uid_tag_data *uid_tag_data_entry;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       if (RB_EMPTY_ROOT(uid_tag_data_tree)) {
+               str = "uid_tag_data_tree=rb_root{}";
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               return;
+       }
+
+       str = "uid_tag_data_tree=rb_root{";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+       indent_level++;
+       for (node = rb_first(uid_tag_data_tree);
+            node;
+            node = rb_next(node)) {
+               uid_tag_data_entry = rb_entry(node, struct uid_tag_data,
+                                             node);
+               str = pp_uid_tag_data(uid_tag_data_entry);
+               pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+               kfree(str);
+               if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) {
+                       indent_level++;
+                       prdebug_tag_ref_tree(indent_level,
+                                            &uid_tag_data_entry->tag_ref_tree);
+                       indent_level--;
+               }
+       }
+       indent_level--;
+       str = "}";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_tag_stat_tree(int indent_level,
+                                 struct rb_root *tag_stat_tree)
+{
+       char *str;
+       struct rb_node *node;
+       struct tag_stat *ts_entry;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       if (RB_EMPTY_ROOT(tag_stat_tree)) {
+               str = "tag_stat_tree{}";
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               return;
+       }
+
+       str = "tag_stat_tree{";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+       indent_level++;
+       for (node = rb_first(tag_stat_tree);
+            node;
+            node = rb_next(node)) {
+               ts_entry = rb_entry(node, struct tag_stat, tn.node);
+               str = pp_tag_stat(ts_entry);
+               pr_debug("%*d: %s\n", indent_level*2, indent_level,
+                        str);
+               kfree(str);
+       }
+       indent_level--;
+       str = "}";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_iface_stat_list(int indent_level,
+                            struct list_head *iface_stat_list)
+{
+       char *str;
+       struct iface_stat *iface_entry;
+
+       if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+               return;
+
+       if (list_empty(iface_stat_list)) {
+               str = "iface_stat_list=list_head{}";
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               return;
+       }
+
+       str = "iface_stat_list=list_head{";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+       indent_level++;
+       list_for_each_entry(iface_entry, iface_stat_list, list) {
+               str = pp_iface_stat(iface_entry);
+               pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+               kfree(str);
+
+               spin_lock_bh(&iface_entry->tag_stat_list_lock);
+               if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) {
+                       indent_level++;
+                       prdebug_tag_stat_tree(indent_level,
+                                             &iface_entry->tag_stat_tree);
+                       indent_level--;
+               }
+               spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+       }
+       indent_level--;
+       str = "}";
+       pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+#endif  /* ifdef DDEBUG */
+/*------------------------------------------*/
+static const char * const netdev_event_strings[] = {
+       "netdev_unknown",
+       "NETDEV_UP",
+       "NETDEV_DOWN",
+       "NETDEV_REBOOT",
+       "NETDEV_CHANGE",
+       "NETDEV_REGISTER",
+       "NETDEV_UNREGISTER",
+       "NETDEV_CHANGEMTU",
+       "NETDEV_CHANGEADDR",
+       "NETDEV_GOING_DOWN",
+       "NETDEV_CHANGENAME",
+       "NETDEV_FEAT_CHANGE",
+       "NETDEV_BONDING_FAILOVER",
+       "NETDEV_PRE_UP",
+       "NETDEV_PRE_TYPE_CHANGE",
+       "NETDEV_POST_TYPE_CHANGE",
+       "NETDEV_POST_INIT",
+       "NETDEV_UNREGISTER_BATCH",
+       "NETDEV_RELEASE",
+       "NETDEV_NOTIFY_PEERS",
+       "NETDEV_JOIN",
+};
+
+const char *netdev_evt_str(int netdev_event)
+{
+       if (netdev_event < 0
+           || netdev_event >= ARRAY_SIZE(netdev_event_strings))
+               return "bad event num";
+       return netdev_event_strings[netdev_event];
+}
diff --git a/net/netfilter/xt_qtaguid_print.h b/net/netfilter/xt_qtaguid_print.h
new file mode 100644 (file)
index 0000000..b63871a
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * Pretty printing Support for iptables xt_qtaguid module.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __XT_QTAGUID_PRINT_H__
+#define __XT_QTAGUID_PRINT_H__
+
+#include "xt_qtaguid_internal.h"
+
+#ifdef DDEBUG
+
+char *pp_tag_t(tag_t *tag);
+char *pp_data_counters(struct data_counters *dc, bool showValues);
+char *pp_tag_node(struct tag_node *tn);
+char *pp_tag_ref(struct tag_ref *tr);
+char *pp_tag_stat(struct tag_stat *ts);
+char *pp_iface_stat(struct iface_stat *is);
+char *pp_sock_tag(struct sock_tag *st);
+char *pp_uid_tag_data(struct uid_tag_data *qtd);
+char *pp_proc_qtu_data(struct proc_qtu_data *pqd);
+
+/*------------------------------------------*/
+void prdebug_sock_tag_list(int indent_level,
+                          struct list_head *sock_tag_list);
+void prdebug_sock_tag_tree(int indent_level,
+                          struct rb_root *sock_tag_tree);
+void prdebug_proc_qtu_data_tree(int indent_level,
+                               struct rb_root *proc_qtu_data_tree);
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree);
+void prdebug_uid_tag_data_tree(int indent_level,
+                              struct rb_root *uid_tag_data_tree);
+void prdebug_tag_stat_tree(int indent_level,
+                          struct rb_root *tag_stat_tree);
+void prdebug_iface_stat_list(int indent_level,
+                            struct list_head *iface_stat_list);
+
+#else
+
+/*------------------------------------------*/
+static inline char *pp_tag_t(tag_t *tag)
+{
+       return NULL;
+}
+static inline char *pp_data_counters(struct data_counters *dc, bool showValues)
+{
+       return NULL;
+}
+static inline char *pp_tag_node(struct tag_node *tn)
+{
+       return NULL;
+}
+static inline char *pp_tag_ref(struct tag_ref *tr)
+{
+       return NULL;
+}
+static inline char *pp_tag_stat(struct tag_stat *ts)
+{
+       return NULL;
+}
+static inline char *pp_iface_stat(struct iface_stat *is)
+{
+       return NULL;
+}
+static inline char *pp_sock_tag(struct sock_tag *st)
+{
+       return NULL;
+}
+static inline char *pp_uid_tag_data(struct uid_tag_data *qtd)
+{
+       return NULL;
+}
+static inline char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
+{
+       return NULL;
+}
+
+/*------------------------------------------*/
+static inline
+void prdebug_sock_tag_list(int indent_level,
+                          struct list_head *sock_tag_list)
+{
+}
+static inline
+void prdebug_sock_tag_tree(int indent_level,
+                          struct rb_root *sock_tag_tree)
+{
+}
+static inline
+void prdebug_proc_qtu_data_tree(int indent_level,
+                               struct rb_root *proc_qtu_data_tree)
+{
+}
+static inline
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
+{
+}
+static inline
+void prdebug_uid_tag_data_tree(int indent_level,
+                              struct rb_root *uid_tag_data_tree)
+{
+}
+static inline
+void prdebug_tag_stat_tree(int indent_level,
+                          struct rb_root *tag_stat_tree)
+{
+}
+static inline
+void prdebug_iface_stat_list(int indent_level,
+                            struct list_head *iface_stat_list)
+{
+}
+#endif
+/*------------------------------------------*/
+const char *netdev_evt_str(int netdev_event);
+#endif  /* ifndef __XT_QTAGUID_PRINT_H__ */
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
new file mode 100644 (file)
index 0000000..4328562
--- /dev/null
@@ -0,0 +1,385 @@
+/*
+ * xt_quota2 - enhanced xt_quota that can count upwards and in packets
+ * as a minimal accounting match.
+ * by Jan Engelhardt <jengelh@medozas.de>, 2008
+ *
+ * Originally based on xt_quota.c:
+ *     netfilter module to enforce network quotas
+ *     Sam Johnston <samj@samj.net>
+ *
+ *     This program is free software; you can redistribute it and/or modify
+ *     it under the terms of the GNU General Public License; either
+ *     version 2 of the License, as published by the Free Software Foundation.
+ */
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_quota2.h>
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+#include <linux/netfilter_ipv4/ipt_ULOG.h>
+#endif
+
+/**
+ * @lock:      lock to protect quota writers from each other
+ */
+struct xt_quota_counter {
+       u_int64_t quota;
+       spinlock_t lock;
+       struct list_head list;
+       atomic_t ref;
+       char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
+       struct proc_dir_entry *procfs_entry;
+};
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+/* Harald's favorite number +1 :D From ipt_ULOG.C */
+static int qlog_nl_event = 112;
+module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(event_num,
+                "Event number for NETLINK_NFLOG message. 0 disables log."
+                "111 is what ipt_ULOG uses.");
+static struct sock *nflognl;
+#endif
+
+static LIST_HEAD(counter_list);
+static DEFINE_SPINLOCK(counter_list_lock);
+
+static struct proc_dir_entry *proc_xt_quota;
+static unsigned int quota_list_perms = S_IRUGO | S_IWUSR;
+static unsigned int quota_list_uid   = 0;
+static unsigned int quota_list_gid   = 0;
+module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
+module_param_named(uid, quota_list_uid, uint, S_IRUGO | S_IWUSR);
+module_param_named(gid, quota_list_gid, uint, S_IRUGO | S_IWUSR);
+
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+static void quota2_log(unsigned int hooknum,
+                      const struct sk_buff *skb,
+                      const struct net_device *in,
+                      const struct net_device *out,
+                      const char *prefix)
+{
+       ulog_packet_msg_t *pm;
+       struct sk_buff *log_skb;
+       size_t size;
+       struct nlmsghdr *nlh;
+
+       if (!qlog_nl_event)
+               return;
+
+       size = NLMSG_SPACE(sizeof(*pm));
+       size = max(size, (size_t)NLMSG_GOODSIZE);
+       log_skb = alloc_skb(size, GFP_ATOMIC);
+       if (!log_skb) {
+               pr_err("xt_quota2: cannot alloc skb for logging\n");
+               return;
+       }
+
+       nlh = nlmsg_put(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event,
+                       sizeof(*pm), 0);
+       if (!nlh) {
+               pr_err("xt_quota2: nlmsg_put failed\n");
+               kfree_skb(log_skb);
+               return;
+       }
+       pm = nlmsg_data(nlh);
+       if (skb->tstamp.tv64 == 0)
+               __net_timestamp((struct sk_buff *)skb);
+       pm->data_len = 0;
+       pm->hook = hooknum;
+       if (prefix != NULL)
+               strlcpy(pm->prefix, prefix, sizeof(pm->prefix));
+       else
+               *(pm->prefix) = '\0';
+       if (in)
+               strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name));
+       else
+               pm->indev_name[0] = '\0';
+
+       if (out)
+               strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
+       else
+               pm->outdev_name[0] = '\0';
+
+       NETLINK_CB(log_skb).dst_group = 1;
+       pr_debug("throwing 1 packets to netlink group 1\n");
+       netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC);
+}
+#else
+static void quota2_log(unsigned int hooknum,
+                      const struct sk_buff *skb,
+                      const struct net_device *in,
+                      const struct net_device *out,
+                      const char *prefix)
+{
+}
+#endif  /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */
+
+static ssize_t quota_proc_read(struct file *file, char __user *buf,
+                          size_t size, loff_t *ppos)
+{
+       struct xt_quota_counter *e = PDE_DATA(file_inode(file));
+       char tmp[24];
+       size_t tmp_size;
+
+       spin_lock_bh(&e->lock);
+       tmp_size = scnprintf(tmp, sizeof(tmp), "%llu\n", e->quota);
+       spin_unlock_bh(&e->lock);
+       return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
+}
+
+static ssize_t quota_proc_write(struct file *file, const char __user *input,
+                            size_t size, loff_t *ppos)
+{
+       struct xt_quota_counter *e = PDE_DATA(file_inode(file));
+       char buf[sizeof("18446744073709551616")];
+
+       if (size > sizeof(buf))
+               size = sizeof(buf);
+       if (copy_from_user(buf, input, size) != 0)
+               return -EFAULT;
+       buf[sizeof(buf)-1] = '\0';
+
+       spin_lock_bh(&e->lock);
+       e->quota = simple_strtoull(buf, NULL, 0);
+       spin_unlock_bh(&e->lock);
+       return size;
+}
+
+static const struct file_operations q2_counter_fops = {
+       .read           = quota_proc_read,
+       .write          = quota_proc_write,
+       .llseek         = default_llseek,
+};
+
+static struct xt_quota_counter *
+q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon)
+{
+       struct xt_quota_counter *e;
+       unsigned int size;
+
+       /* Do not need all the procfs things for anonymous counters. */
+       size = anon ? offsetof(typeof(*e), list) : sizeof(*e);
+       e = kmalloc(size, GFP_KERNEL);
+       if (e == NULL)
+               return NULL;
+
+       e->quota = q->quota;
+       spin_lock_init(&e->lock);
+       if (!anon) {
+               INIT_LIST_HEAD(&e->list);
+               atomic_set(&e->ref, 1);
+               strlcpy(e->name, q->name, sizeof(e->name));
+       }
+       return e;
+}
+
+/**
+ * q2_get_counter - get ref to counter or create new
+ * @name:      name of counter
+ */
+static struct xt_quota_counter *
+q2_get_counter(const struct xt_quota_mtinfo2 *q)
+{
+       struct proc_dir_entry *p;
+       struct xt_quota_counter *e = NULL;
+       struct xt_quota_counter *new_e;
+
+       if (*q->name == '\0')
+               return q2_new_counter(q, true);
+
+       /* No need to hold a lock while getting a new counter */
+       new_e = q2_new_counter(q, false);
+       if (new_e == NULL)
+               goto out;
+
+       spin_lock_bh(&counter_list_lock);
+       list_for_each_entry(e, &counter_list, list)
+               if (strcmp(e->name, q->name) == 0) {
+                       atomic_inc(&e->ref);
+                       spin_unlock_bh(&counter_list_lock);
+                       kfree(new_e);
+                       pr_debug("xt_quota2: old counter name=%s", e->name);
+                       return e;
+               }
+       e = new_e;
+       pr_debug("xt_quota2: new_counter name=%s", e->name);
+       list_add_tail(&e->list, &counter_list);
+       /* The entry having a refcount of 1 is not directly destructible.
+        * This func has not yet returned the new entry, thus iptables
+        * has not references for destroying this entry.
+        * For another rule to try to destroy it, it would 1st need for this
+        * func* to be re-invoked, acquire a new ref for the same named quota.
+        * Nobody will access the e->procfs_entry either.
+        * So release the lock. */
+       spin_unlock_bh(&counter_list_lock);
+
+       /* create_proc_entry() is not spin_lock happy */
+       p = e->procfs_entry = proc_create_data(e->name, quota_list_perms,
+                             proc_xt_quota, &q2_counter_fops, e);
+
+       if (IS_ERR_OR_NULL(p)) {
+               spin_lock_bh(&counter_list_lock);
+               list_del(&e->list);
+               spin_unlock_bh(&counter_list_lock);
+               goto out;
+       }
+       proc_set_user(p, quota_list_uid, quota_list_gid);
+       return e;
+
+ out:
+       kfree(e);
+       return NULL;
+}
+
+static int quota_mt2_check(const struct xt_mtchk_param *par)
+{
+       struct xt_quota_mtinfo2 *q = par->matchinfo;
+
+       pr_debug("xt_quota2: check() flags=0x%04x", q->flags);
+
+       if (q->flags & ~XT_QUOTA_MASK)
+               return -EINVAL;
+
+       q->name[sizeof(q->name)-1] = '\0';
+       if (*q->name == '.' || strchr(q->name, '/') != NULL) {
+               printk(KERN_ERR "xt_quota.3: illegal name\n");
+               return -EINVAL;
+       }
+
+       q->master = q2_get_counter(q);
+       if (q->master == NULL) {
+               printk(KERN_ERR "xt_quota.3: memory alloc failure\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
+{
+       struct xt_quota_mtinfo2 *q = par->matchinfo;
+       struct xt_quota_counter *e = q->master;
+
+       if (*q->name == '\0') {
+               kfree(e);
+               return;
+       }
+
+       spin_lock_bh(&counter_list_lock);
+       if (!atomic_dec_and_test(&e->ref)) {
+               spin_unlock_bh(&counter_list_lock);
+               return;
+       }
+
+       list_del(&e->list);
+       remove_proc_entry(e->name, proc_xt_quota);
+       spin_unlock_bh(&counter_list_lock);
+       kfree(e);
+}
+
+static bool
+quota_mt2(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       struct xt_quota_mtinfo2 *q = (void *)par->matchinfo;
+       struct xt_quota_counter *e = q->master;
+       bool ret = q->flags & XT_QUOTA_INVERT;
+
+       spin_lock_bh(&e->lock);
+       if (q->flags & XT_QUOTA_GROW) {
+               /*
+                * While no_change is pointless in "grow" mode, we will
+                * implement it here simply to have a consistent behavior.
+                */
+               if (!(q->flags & XT_QUOTA_NO_CHANGE)) {
+                       e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+               }
+               ret = true;
+       } else {
+               if (e->quota >= skb->len) {
+                       if (!(q->flags & XT_QUOTA_NO_CHANGE))
+                               e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+                       ret = !ret;
+               } else {
+                       /* We are transitioning, log that fact. */
+                       if (e->quota) {
+                               quota2_log(par->hooknum,
+                                          skb,
+                                          par->in,
+                                          par->out,
+                                          q->name);
+                       }
+                       /* we do not allow even small packets from now on */
+                       e->quota = 0;
+               }
+       }
+       spin_unlock_bh(&e->lock);
+       return ret;
+}
+
+static struct xt_match quota_mt2_reg[] __read_mostly = {
+       {
+               .name       = "quota2",
+               .revision   = 3,
+               .family     = NFPROTO_IPV4,
+               .checkentry = quota_mt2_check,
+               .match      = quota_mt2,
+               .destroy    = quota_mt2_destroy,
+               .matchsize  = sizeof(struct xt_quota_mtinfo2),
+               .me         = THIS_MODULE,
+       },
+       {
+               .name       = "quota2",
+               .revision   = 3,
+               .family     = NFPROTO_IPV6,
+               .checkentry = quota_mt2_check,
+               .match      = quota_mt2,
+               .destroy    = quota_mt2_destroy,
+               .matchsize  = sizeof(struct xt_quota_mtinfo2),
+               .me         = THIS_MODULE,
+       },
+};
+
+static int __init quota_mt2_init(void)
+{
+       int ret;
+       pr_debug("xt_quota2: init()");
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+       nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, NULL);
+       if (!nflognl)
+               return -ENOMEM;
+#endif
+
+       proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
+       if (proc_xt_quota == NULL)
+               return -EACCES;
+
+       ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+       if (ret < 0)
+               remove_proc_entry("xt_quota", init_net.proc_net);
+       pr_debug("xt_quota2: init() %d", ret);
+       return ret;
+}
+
+static void __exit quota_mt2_exit(void)
+{
+       xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+       remove_proc_entry("xt_quota", init_net.proc_net);
+}
+
+module_init(quota_mt2_init);
+module_exit(quota_mt2_exit);
+MODULE_DESCRIPTION("Xtables: countdown quota match; up counter");
+MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_quota2");
+MODULE_ALIAS("ip6t_quota2");
index 63b2bdb59e955fd012de3787f9e52279169fe4bc..030f33cd9ee9b2395a5669257678dbe1c5688535 100644 (file)
@@ -35,7 +35,7 @@
 #include <net/netfilter/nf_conntrack.h>
 #endif
 
-static void
+void
 xt_socket_put_sk(struct sock *sk)
 {
        if (sk->sk_state == TCP_TIME_WAIT)
@@ -43,6 +43,7 @@ xt_socket_put_sk(struct sock *sk)
        else
                sock_put(sk);
 }
+EXPORT_SYMBOL(xt_socket_put_sk);
 
 static int
 extract_icmp4_fields(const struct sk_buff *skb,
@@ -101,9 +102,8 @@ extract_icmp4_fields(const struct sk_buff *skb,
        return 0;
 }
 
-static bool
-socket_match(const struct sk_buff *skb, struct xt_action_param *par,
-            const struct xt_socket_mtinfo1 *info)
+struct sock*
+xt_socket_get4_sk(const struct sk_buff *skb, struct xt_action_param *par)
 {
        const struct iphdr *iph = ip_hdr(skb);
        struct udphdr _hdr, *hp = NULL;
@@ -120,7 +120,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
                hp = skb_header_pointer(skb, ip_hdrlen(skb),
                                        sizeof(_hdr), &_hdr);
                if (hp == NULL)
-                       return false;
+                       return NULL;
 
                protocol = iph->protocol;
                saddr = iph->saddr;
@@ -131,9 +131,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
        } else if (iph->protocol == IPPROTO_ICMP) {
                if (extract_icmp4_fields(skb, &protocol, &saddr, &daddr,
                                        &sport, &dport))
-                       return false;
+                       return NULL;
        } else {
-               return false;
+               return NULL;
        }
 
 #ifdef XT_SOCKET_HAVE_CONNTRACK
@@ -157,6 +157,23 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
 
        sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
                                   saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
+
+       pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
+                protocol, &saddr, ntohs(sport),
+                &daddr, ntohs(dport),
+                &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
+
+       return sk;
+}
+EXPORT_SYMBOL(xt_socket_get4_sk);
+
+static bool
+socket_match(const struct sk_buff *skb, struct xt_action_param *par,
+            const struct xt_socket_mtinfo1 *info)
+{
+       struct sock *sk;
+
+       sk = xt_socket_get4_sk(skb, par);
        if (sk != NULL) {
                bool wildcard;
                bool transparent = true;
@@ -179,11 +196,6 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
                        sk = NULL;
        }
 
-       pr_debug("proto %hhu %pI4:%hu -> %pI4:%hu (orig %pI4:%hu) sock %p\n",
-                protocol, &saddr, ntohs(sport),
-                &daddr, ntohs(dport),
-                &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
-
        return (sk != NULL);
 }
 
@@ -255,8 +267,8 @@ extract_icmp6_fields(const struct sk_buff *skb,
        return 0;
 }
 
-static bool
-socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
+struct sock*
+xt_socket_get6_sk(const struct sk_buff *skb, struct xt_action_param *par)
 {
        struct ipv6hdr *iph = ipv6_hdr(skb);
        struct udphdr _hdr, *hp = NULL;
@@ -264,7 +276,6 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
        struct in6_addr *daddr = NULL, *saddr = NULL;
        __be16 uninitialized_var(dport), uninitialized_var(sport);
        int thoff = 0, uninitialized_var(tproto);
-       const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
 
        tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
        if (tproto < 0) {
@@ -276,7 +287,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
                hp = skb_header_pointer(skb, thoff,
                                        sizeof(_hdr), &_hdr);
                if (hp == NULL)
-                       return false;
+                       return NULL;
 
                saddr = &iph->saddr;
                sport = hp->source;
@@ -286,13 +297,30 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
        } else if (tproto == IPPROTO_ICMPV6) {
                if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
                                         &sport, &dport))
-                       return false;
+                       return NULL;
        } else {
-               return false;
+               return NULL;
        }
 
        sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
                                   saddr, daddr, sport, dport, par->in, NFT_LOOKUP_ANY);
+       pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
+                "(orig %pI6:%hu) sock %p\n",
+                tproto, saddr, ntohs(sport),
+                daddr, ntohs(dport),
+                &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
+       return sk;
+}
+EXPORT_SYMBOL(xt_socket_get6_sk);
+
+static bool
+socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       struct sock *sk;
+       const struct xt_socket_mtinfo1 *info;
+
+       info = (struct xt_socket_mtinfo1 *) par->matchinfo;
+       sk = xt_socket_get6_sk(skb, par);
        if (sk != NULL) {
                bool wildcard;
                bool transparent = true;
@@ -315,12 +343,6 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
                        sk = NULL;
        }
 
-       pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
-                "(orig %pI6:%hu) sock %p\n",
-                tproto, saddr, ntohs(sport),
-                daddr, ntohs(dport),
-                &iph->daddr, hp ? ntohs(hp->dest) : 0, sk);
-
        return (sk != NULL);
 }
 #endif
index 78efe895b6636c4a57af6d72f156501e2c778992..8e12c8a2b82b613a5637537685b71174c412c028 100644 (file)
@@ -10,6 +10,11 @@ menuconfig RFKILL
          To compile this driver as a module, choose M here: the
          module will be called rfkill.
 
+config RFKILL_PM
+       bool "Power off on suspend"
+       depends on RFKILL && PM
+       default y
+
 # LED trigger support
 config RFKILL_LEDS
        bool
index 1cec5e4f3a5e62ff4503d937d78d435c1761cd52..c099b4fffd93d17e6167cd0530589008cea166d8 100644 (file)
@@ -792,6 +792,7 @@ void rfkill_pause_polling(struct rfkill *rfkill)
 }
 EXPORT_SYMBOL(rfkill_pause_polling);
 
+#ifdef CONFIG_RFKILL_PM
 void rfkill_resume_polling(struct rfkill *rfkill)
 {
        BUG_ON(!rfkill);
@@ -826,14 +827,17 @@ static int rfkill_resume(struct device *dev)
 
        return 0;
 }
+#endif
 
 static struct class rfkill_class = {
        .name           = "rfkill",
        .dev_release    = rfkill_release,
        .dev_attrs      = rfkill_dev_attrs,
        .dev_uevent     = rfkill_dev_uevent,
+#ifdef CONFIG_RFKILL_PM
        .suspend        = rfkill_suspend,
        .resume         = rfkill_resume,
+#endif
 };
 
 bool rfkill_blocked(struct rfkill *rfkill)
index 5356b120dbf8e2fe61ba88081d1fab1941f135a9..77d251e0259315eeef4acd4de50def4382bba5e1 100644 (file)
@@ -254,7 +254,7 @@ static int rpc_wait_bit_killable(void *word)
 {
        if (fatal_signal_pending(current))
                return -ERESTARTSYS;
-       freezable_schedule();
+       freezable_schedule_unsafe();
        return 0;
 }
 
index 75e198d029d2fe044161656b0d589510369a3b22..c80c107139f177cb304147f3a77c2dc24788ab9c 100644 (file)
 #include <linux/mount.h>
 #include <net/checksum.h>
 #include <linux/security.h>
+#include <linux/freezer.h>
 
 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
 EXPORT_SYMBOL_GPL(unix_socket_table);
@@ -1896,7 +1897,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
 
                set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
                unix_state_unlock(sk);
-               timeo = schedule_timeout(timeo);
+               timeo = freezable_schedule_timeout(timeo);
                unix_state_lock(sk);
                clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
        }
index 16d08b39921071479456e2033c5e371d381175b7..4c602d100480cd624c1fd89567bb190e657fa6c1 100644 (file)
@@ -166,3 +166,14 @@ config LIB80211_DEBUG
          from lib80211.
 
          If unsure, say N.
+
+config CFG80211_ALLOW_RECONNECT
+       bool "Allow reconnect while already connected"
+       depends on CFG80211
+       default n
+       help
+         cfg80211 stack doesn't allow to connect if you are already
+         connected. This option allows to make a connection in this case.
+
+         Select this option ONLY for wlan drivers that are specifically
+         built for such purposes.
index fd35dae547c446cbae459e0ea8c3f679320b952c..15b4bb7ac04651839712649a23089d79acb6cf7c 100644 (file)
@@ -77,9 +77,7 @@ struct cfg80211_registered_device {
 
        struct mutex sched_scan_mtx;
 
-#ifdef CONFIG_NL80211_TESTMODE
-       struct genl_info *testmode_info;
-#endif
+       struct genl_info *cur_cmd_info;
 
        struct work_struct conn_work;
        struct work_struct event_work;
index 448c034184e275619732d23c01a01f83aad44ba7..95d93678bf55458ad194f998245dbd7987dfa004 100644 (file)
@@ -378,6 +378,12 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_MDID] = { .type = NLA_U16 },
        [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
                                  .len = IEEE80211_MAX_DATA_LEN },
+       [NL80211_ATTR_PEER_AID] = { .type = NLA_U16 },
+       [NL80211_ATTR_STA_SUPPORTED_CHANNELS] = { .type = NLA_BINARY },
+       [NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] = { .type = NLA_BINARY },
+       [NL80211_ATTR_VENDOR_ID] = { .type = NLA_U32 },
+       [NL80211_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 },
+       [NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
 };
 
 /* policy for the key attributes */
@@ -1520,6 +1526,39 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                                dev->wiphy.max_acl_mac_addrs))
                        goto nla_put_failure;
 
+               if (dev->wiphy.n_vendor_commands) {
+                       const struct nl80211_vendor_cmd_info *info;
+                       struct nlattr *nested;
+
+                       nested = nla_nest_start(msg, NL80211_ATTR_VENDOR_DATA);
+                       if (!nested)
+                               goto nla_put_failure;
+
+                       for (i = 0; i < dev->wiphy.n_vendor_commands; i++) {
+                               info = &dev->wiphy.vendor_commands[i].info;
+                               if (nla_put(msg, i + 1, sizeof(*info), info))
+                                       goto nla_put_failure;
+                       }
+                               nla_nest_end(msg, nested);
+               }
+
+               if (dev->wiphy.n_vendor_events) {
+                       const struct nl80211_vendor_cmd_info *info;
+                       struct nlattr *nested;
+
+                       nested = nla_nest_start(msg,
+                               NL80211_ATTR_VENDOR_EVENTS);
+                       if (!nested)
+                               goto nla_put_failure;
+
+                       for (i = 0; i < dev->wiphy.n_vendor_events; i++) {
+                               info = &dev->wiphy.vendor_events[i];
+                               if (nla_put(msg, i + 1, sizeof(*info), info))
+                                       goto nla_put_failure;
+                       }
+                       nla_nest_end(msg, nested);
+               }
+
                /*
                 * Any information below this point is only available to
                 * applications that can deal with it being split. This
@@ -2635,8 +2674,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
 
        hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_NEW_KEY);
-       if (IS_ERR(hdr))
-               return PTR_ERR(hdr);
+       if (!hdr)
+               goto nla_put_failure;
 
        cookie.msg = msg;
        cookie.idx = key_idx;
@@ -6393,12 +6432,62 @@ static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info)
        return err;
 }
 
+static struct sk_buff *
+__cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
+                           int approxlen, u32 portid, u32 seq,
+                           enum nl80211_commands cmd,
+                           enum nl80211_attrs attr,
+                           const struct nl80211_vendor_cmd_info *info,
+                           gfp_t gfp)
+{
+       struct sk_buff *skb;
+       void *hdr;
+       struct nlattr *data;
+
+       skb = nlmsg_new(approxlen + 100, gfp);
+       if (!skb)
+               return NULL;
+
+       hdr = nl80211hdr_put(skb, portid, seq, 0, cmd);
+       if (!hdr) {
+               kfree_skb(skb);
+               return NULL;
+       }
+
+       if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
+               goto nla_put_failure;
+
+       if (info) {
+               if (nla_put_u32(skb, NL80211_ATTR_VENDOR_ID,
+                               info->vendor_id))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, NL80211_ATTR_VENDOR_SUBCMD,
+                               info->subcmd))
+                       goto nla_put_failure;
+       }
+
+       data = nla_nest_start(skb, attr);
+
+       ((void **)skb->cb)[0] = rdev;
+       ((void **)skb->cb)[1] = hdr;
+       ((void **)skb->cb)[2] = data;
+
+       return skb;
+
+ nla_put_failure:
+       kfree_skb(skb);
+       return NULL;
+}
 
 #ifdef CONFIG_NL80211_TESTMODE
 static struct genl_multicast_group nl80211_testmode_mcgrp = {
        .name = "testmode",
 };
 
+static struct genl_multicast_group nl80211_vendor_mcgrp = {
+       .name = "vendor",
+};
+
 static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info)
 {
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -6409,11 +6498,11 @@ static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info)
 
        err = -EOPNOTSUPP;
        if (rdev->ops->testmode_cmd) {
-               rdev->testmode_info = info;
+               rdev->cur_cmd_info = info;
                err = rdev_testmode_cmd(rdev,
                                nla_data(info->attrs[NL80211_ATTR_TESTDATA]),
                                nla_len(info->attrs[NL80211_ATTR_TESTDATA]));
-               rdev->testmode_info = NULL;
+               rdev->cur_cmd_info = NULL;
        }
 
        return err;
@@ -6482,6 +6571,9 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
                                           NL80211_CMD_TESTMODE);
                struct nlattr *tmdata;
 
+               if (!hdr)
+                       break;
+
                if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
                        genlmsg_cancel(skb, hdr);
                        break;
@@ -6514,81 +6606,37 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
        return err;
 }
 
-static struct sk_buff *
-__cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev,
-                             int approxlen, u32 portid, u32 seq, gfp_t gfp)
-{
-       struct sk_buff *skb;
-       void *hdr;
-       struct nlattr *data;
-
-       skb = nlmsg_new(approxlen + 100, gfp);
-       if (!skb)
-               return NULL;
-
-       hdr = nl80211hdr_put(skb, portid, seq, 0, NL80211_CMD_TESTMODE);
-       if (!hdr) {
-               kfree_skb(skb);
-               return NULL;
-       }
-
-       if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
-               goto nla_put_failure;
-       data = nla_nest_start(skb, NL80211_ATTR_TESTDATA);
-
-       ((void **)skb->cb)[0] = rdev;
-       ((void **)skb->cb)[1] = hdr;
-       ((void **)skb->cb)[2] = data;
-
-       return skb;
-
- nla_put_failure:
-       kfree_skb(skb);
-       return NULL;
-}
-
-struct sk_buff *cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy,
-                                                 int approxlen)
+struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
+                                          enum nl80211_commands cmd,
+                                          enum nl80211_attrs attr,
+                                          int vendor_event_idx,
+                                          int approxlen, gfp_t gfp)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       const struct nl80211_vendor_cmd_info *info;
 
-       if (WARN_ON(!rdev->testmode_info))
+       switch (cmd) {
+       case NL80211_CMD_TESTMODE:
+               if (WARN_ON(vendor_event_idx != -1))
+                       return NULL;
+               info = NULL;
+               break;
+       case NL80211_CMD_VENDOR:
+               if (WARN_ON(vendor_event_idx < 0 ||
+                           vendor_event_idx >= wiphy->n_vendor_events))
+                       return NULL;
+               info = &wiphy->vendor_events[vendor_event_idx];
+               break;
+       default:
+               WARN_ON(1);
                return NULL;
-
-       return __cfg80211_testmode_alloc_skb(rdev, approxlen,
-                               rdev->testmode_info->snd_portid,
-                               rdev->testmode_info->snd_seq,
-                               GFP_KERNEL);
-}
-EXPORT_SYMBOL(cfg80211_testmode_alloc_reply_skb);
-
-int cfg80211_testmode_reply(struct sk_buff *skb)
-{
-       struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
-       void *hdr = ((void **)skb->cb)[1];
-       struct nlattr *data = ((void **)skb->cb)[2];
-
-       if (WARN_ON(!rdev->testmode_info)) {
-               kfree_skb(skb);
-               return -EINVAL;
        }
-
-       nla_nest_end(skb, data);
-       genlmsg_end(skb, hdr);
-       return genlmsg_reply(skb, rdev->testmode_info);
-}
-EXPORT_SYMBOL(cfg80211_testmode_reply);
-
-struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy,
-                                                 int approxlen, gfp_t gfp)
-{
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
-
-       return __cfg80211_testmode_alloc_skb(rdev, approxlen, 0, 0, gfp);
+       return __cfg80211_alloc_vendor_skb(rdev, approxlen, 0, 0,
+                                          cmd, attr, info, gfp);
 }
-EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb);
+EXPORT_SYMBOL(__cfg80211_alloc_event_skb);
 
-void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
+void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp)
 {
        struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
        void *hdr = ((void **)skb->cb)[1];
@@ -6596,10 +6644,15 @@ void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
 
        nla_nest_end(skb, data);
        genlmsg_end(skb, hdr);
-       genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
-                               nl80211_testmode_mcgrp.id, gfp);
+
+       if (data->nla_type == NL80211_ATTR_VENDOR_DATA)
+               genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
+                       nl80211_vendor_mcgrp.id, gfp);
+       else
+               genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
+                       nl80211_testmode_mcgrp.id, gfp);
 }
-EXPORT_SYMBOL(cfg80211_testmode_event);
+EXPORT_SYMBOL(__cfg80211_send_event_skb);
 #endif
 
 static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
@@ -6920,9 +6973,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
 
        hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_REMAIN_ON_CHANNEL);
-
-       if (IS_ERR(hdr)) {
-               err = PTR_ERR(hdr);
+       if (!hdr) {
+               err = -ENOBUFS;
                goto free_msg;
        }
 
@@ -7209,9 +7261,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
 
                hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                                     NL80211_CMD_FRAME);
-
-               if (IS_ERR(hdr)) {
-                       err = PTR_ERR(hdr);
+               if (!hdr) {
+                       err = -ENOBUFS;
                        goto free_msg;
                }
        }
@@ -8072,9 +8123,8 @@ static int nl80211_probe_client(struct sk_buff *skb,
 
        hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_PROBE_CLIENT);
-
-       if (IS_ERR(hdr)) {
-               err = PTR_ERR(hdr);
+       if (!hdr) {
+               err = -ENOBUFS;
                goto free_msg;
        }
 
@@ -8291,6 +8341,108 @@ static int nl80211_crit_protocol_stop(struct sk_buff *skb,
        return 0;
 }
 
+static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct wireless_dev *wdev =
+               __cfg80211_wdev_from_attrs(genl_info_net(info), info->attrs);
+       int i, err;
+       u32 vid, subcmd;
+
+       if (!rdev || !rdev->wiphy.vendor_commands)
+               return -EOPNOTSUPP;
+
+       if (IS_ERR(wdev)) {
+               err = PTR_ERR(wdev);
+               if (err != -EINVAL)
+                       return err;
+               wdev = NULL;
+       } else if (wdev->wiphy != &rdev->wiphy) {
+               return -EINVAL;
+       }
+
+       if (!info->attrs[NL80211_ATTR_VENDOR_ID] ||
+           !info->attrs[NL80211_ATTR_VENDOR_SUBCMD])
+               return -EINVAL;
+
+       vid = nla_get_u32(info->attrs[NL80211_ATTR_VENDOR_ID]);
+       subcmd = nla_get_u32(info->attrs[NL80211_ATTR_VENDOR_SUBCMD]);
+       for (i = 0; i < rdev->wiphy.n_vendor_commands; i++) {
+               const struct wiphy_vendor_command *vcmd;
+               void *data = NULL;
+               int len = 0;
+
+               vcmd = &rdev->wiphy.vendor_commands[i];
+
+               if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
+                       continue;
+
+               if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
+                                  WIPHY_VENDOR_CMD_NEED_NETDEV)) {
+                       if (!wdev)
+                               return -EINVAL;
+                       if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
+                           !wdev->netdev)
+                               return -EINVAL;
+
+                       if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
+                               if (!wdev->netdev ||
+                                   !netif_running(wdev->netdev))
+                                       return -ENETDOWN;
+                       }
+               } else {
+                       wdev = NULL;
+               }
+
+               if (info->attrs[NL80211_ATTR_VENDOR_DATA]) {
+                       data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]);
+                       len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]);
+               }
+
+               rdev->cur_cmd_info = info;
+               err = rdev->wiphy.vendor_commands[i].doit(&rdev->wiphy, wdev,
+                                                               data, len);
+               rdev->cur_cmd_info = NULL;
+               return err;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
+                                          enum nl80211_commands cmd,
+                                          enum nl80211_attrs attr,
+                                          int approxlen)
+{
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+       if (WARN_ON(!rdev->cur_cmd_info))
+               return NULL;
+
+       return __cfg80211_alloc_vendor_skb(rdev, approxlen,
+                                          0,
+                                          0,
+                                          cmd, attr, NULL, GFP_KERNEL);
+}
+EXPORT_SYMBOL(__cfg80211_alloc_reply_skb);
+
+int cfg80211_vendor_cmd_reply(struct sk_buff *skb)
+{
+       struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
+       void *hdr = ((void **)skb->cb)[1];
+       struct nlattr *data = ((void **)skb->cb)[2];
+
+       if (WARN_ON(!rdev->cur_cmd_info)) {
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       nla_nest_end(skb, data);
+       genlmsg_end(skb, hdr);
+       return genlmsg_reply(skb, rdev->cur_cmd_info);
+}
+EXPORT_SYMBOL(cfg80211_vendor_cmd_reply);
+
 #define NL80211_FLAG_NEED_WIPHY                0x01
 #define NL80211_FLAG_NEED_NETDEV       0x02
 #define NL80211_FLAG_NEED_RTNL         0x04
@@ -8995,7 +9147,14 @@ static struct genl_ops nl80211_ops[] = {
                .flags = GENL_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
-       }
+       },
+       {
+               .cmd = NL80211_CMD_VENDOR,
+               .doit = nl80211_vendor_cmd,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_RTNL,
+       },
 };
 
 static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -10816,6 +10975,10 @@ int nl80211_init(void)
                goto err_out;
 #endif
 
+       err = genl_register_mc_group(&nl80211_fam, &nl80211_vendor_mcgrp);
+       if (err)
+               goto err_out;
+
        err = netlink_register_notifier(&nl80211_netlink_notifier);
        if (err)
                goto err_out;
index 81019ee3ddc8c8a71955322d193238962edc05b4..4db2177a69ea1f684d537b54256b8bf17b8a1707 100644 (file)
@@ -55,7 +55,7 @@
  * also linked into the probe response struct.
  */
 
-#define IEEE80211_SCAN_RESULT_EXPIRE   (30 * HZ)
+#define IEEE80211_SCAN_RESULT_EXPIRE   (3 * HZ)
 
 static void bss_free(struct cfg80211_internal_bss *bss)
 {
index 3ed35c345caed81966eb0ad52b7e1a4b350ec473..e2f74e66a1697e7fa4f10ac20a49b39e44848ebf 100644 (file)
@@ -707,8 +707,10 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
                    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
                return;
 
+#ifndef CONFIG_CFG80211_ALLOW_RECONNECT
        if (wdev->sme_state != CFG80211_SME_CONNECTED)
                return;
+#endif
 
        if (wdev->current_bss) {
                cfg80211_unhold_bss(wdev->current_bss);
@@ -785,10 +787,14 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
 
        ASSERT_WDEV_LOCK(wdev);
 
+#ifndef CONFIG_CFG80211_ALLOW_RECONNECT
        if (wdev->sme_state != CFG80211_SME_IDLE)
                return -EALREADY;
 
        if (WARN_ON(wdev->connect_keys)) {
+#else
+       if (wdev->connect_keys) {
+#endif
                kfree(wdev->connect_keys);
                wdev->connect_keys = NULL;
        }
index c4b37f6b5478c2ed457b254a1e4455955d6907aa..76fec094054576a14f2d80ac9812dc3db2be6b84 100644 (file)
@@ -274,6 +274,18 @@ $(obj)/%.dtb: $(src)/%.dts FORCE
 
 dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
 
+$(obj)/%.dtb: $(src)/%.dts FORCE
+       $(call if_changed_dep,dtc)
+
+dtc-tmp = $(subst $(comma),_,$(dot-target).dts)
+
+quiet_cmd_dtc_cpp = DTC+CPP $@
+cmd_dtc_cpp = $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
+       $(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 $(DTC_FLAGS) $(dtc-tmp)
+
+$(obj)/%.dtb: $(src)/%.dtsp FORCE
+       $(call if_changed_dep,dtc_cpp)
+
 # Bzip2
 # ---------------------------------------------------------------------------
 
index 43eda40c38383e007a3ab66c755a86a085eef9cc..35e0f164ef8128be1058cb7562c200263a22d595 100644 (file)
@@ -1083,7 +1083,7 @@ static void randomize_choice_values(struct symbol *csym)
        csym->flags &= ~(SYMBOL_VALID);
 }
 
-static void set_all_choice_values(struct symbol *csym)
+void set_all_choice_values(struct symbol *csym)
 {
        struct property *prop;
        struct symbol *sym;
@@ -1100,7 +1100,7 @@ static void set_all_choice_values(struct symbol *csym)
        }
        csym->flags |= SYMBOL_DEF_USER;
        /* clear VALID to get value calculated */
-       csym->flags &= ~(SYMBOL_VALID);
+       csym->flags &= ~(SYMBOL_VALID | SYMBOL_NEED_SET_CHOICE_VALUES);
 }
 
 void conf_set_all_new_symbols(enum conf_def_mode mode)
@@ -1202,6 +1202,14 @@ void conf_set_all_new_symbols(enum conf_def_mode mode)
         * selected in a choice block and we set it to yes,
         * and the rest to no.
         */
+       if (mode != def_random) {
+               for_all_symbols(i, csym) {
+                       if ((sym_is_choice(csym) && !sym_has_value(csym)) ||
+                           sym_is_choice_value(csym))
+                               csym->flags |= SYMBOL_NEED_SET_CHOICE_VALUES;
+               }
+       }
+
        for_all_symbols(i, csym) {
                if (sym_has_value(csym) || !sym_is_choice(csym))
                        continue;
@@ -1209,7 +1217,5 @@ void conf_set_all_new_symbols(enum conf_def_mode mode)
                sym_calc_value(csym);
                if (mode == def_random)
                        randomize_choice_values(csym);
-               else
-                       set_all_choice_values(csym);
        }
 }
index cdd48600e02a9bd842f2e43ab6b7f263949451ec..df198a5f482217781e7d4a605962bf443881ce7b 100644 (file)
@@ -106,6 +106,9 @@ struct symbol {
 #define SYMBOL_DEF3       0x40000  /* symbol.def[S_DEF_3] is valid */
 #define SYMBOL_DEF4       0x80000  /* symbol.def[S_DEF_4] is valid */
 
+/* choice values need to be set before calculating this symbol value */
+#define SYMBOL_NEED_SET_CHOICE_VALUES  0x100000
+
 #define SYMBOL_MAXLENGTH       256
 #define SYMBOL_HASHSIZE                9973
 
index f8aee5fc6d5e607625fc9e7f93bcbcf7f2e9f13b..0c8d4191ca87940b8653034c1ab56e6883eaab50 100644 (file)
@@ -87,6 +87,7 @@ char *conf_get_default_confname(void);
 void sym_set_change_count(int count);
 void sym_add_change_count(int count);
 void conf_set_all_new_symbols(enum conf_def_mode mode);
+void set_all_choice_values(struct symbol *csym);
 
 struct conf_printer {
        void (*print_symbol)(FILE *, struct symbol *, const char *, void *);
index ecc5aa5f865db7d253facefc10cc77fc2404e41d..ab8f4c83593308e0c245bf4136c7bff6f0bff763 100644 (file)
@@ -300,6 +300,14 @@ void sym_calc_value(struct symbol *sym)
 
        if (sym->flags & SYMBOL_VALID)
                return;
+
+       if (sym_is_choice_value(sym) &&
+           sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES) {
+               sym->flags &= ~SYMBOL_NEED_SET_CHOICE_VALUES;
+               prop = sym_get_choice_prop(sym);
+               sym_calc_value(prop_get_symbol(prop));
+       }
+
        sym->flags |= SYMBOL_VALID;
 
        oldval = sym->curr;
@@ -425,6 +433,9 @@ void sym_calc_value(struct symbol *sym)
 
        if (sym->flags & SYMBOL_AUTO)
                sym->flags &= ~SYMBOL_WRITE;
+
+       if (sym->flags & SYMBOL_NEED_SET_CHOICE_VALUES)
+               set_all_choice_values(sym);
 }
 
 void sym_clear_all_valid(void)
index 1728d4e375db509c4e192e0e69fb1a0ee020bf49..6e4fc776badf16cf568adcd88386d1fcfeacb8aa 100644 (file)
 
 #include <linux/security.h>
 
+static int cap_binder_set_context_mgr(struct task_struct *mgr)
+{
+       return 0;
+}
+
+static int cap_binder_transaction(struct task_struct *from, struct task_struct *to)
+{
+       return 0;
+}
+
+static int cap_binder_transfer_binder(struct task_struct *from, struct task_struct *to)
+{
+       return 0;
+}
+
+static int cap_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file)
+{
+       return 0;
+}
+
 static int cap_syslog(int type)
 {
        return 0;
@@ -903,6 +923,10 @@ static void cap_audit_rule_free(void *lsmrule)
 
 void __init security_fixup_ops(struct security_operations *ops)
 {
+       set_to_cap_if_null(ops, binder_set_context_mgr);
+       set_to_cap_if_null(ops, binder_transaction);
+       set_to_cap_if_null(ops, binder_transfer_binder);
+       set_to_cap_if_null(ops, binder_transfer_file);
        set_to_cap_if_null(ops, ptrace_access_check);
        set_to_cap_if_null(ops, ptrace_traceme);
        set_to_cap_if_null(ops, capget);
index c44b6fe6648e6945518db1a1be1c66b43131b875..5870fdc224b436d954f240a546b5398bbc3ce028 100644 (file)
 #include <linux/binfmts.h>
 #include <linux/personality.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+#endif
+
 /*
  * If a non-root user executes a setuid-root binary in
  * !secure(SECURE_NOROOT) mode, then we raise capabilities.
@@ -78,6 +82,13 @@ int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
 {
        struct user_namespace *ns = targ_ns;
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+       if (cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW))
+               return 0;
+       if (cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN))
+               return 0;
+#endif
+
        /* See if cred has the capability in the target user namespace
         * by examining the target user namespace and all of the target
         * user namespace's parents.
index a3dce87d1aeffccb92181631ef2687baec41d36c..d6ab2d2b363dace7394b060a2a64646762dc0d1f 100644 (file)
@@ -134,6 +134,26 @@ int __init register_security(struct security_operations *ops)
 
 /* Security operations */
 
+int security_binder_set_context_mgr(struct task_struct *mgr)
+{
+       return security_ops->binder_set_context_mgr(mgr);
+}
+
+int security_binder_transaction(struct task_struct *from, struct task_struct *to)
+{
+       return security_ops->binder_transaction(from, to);
+}
+
+int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to)
+{
+       return security_ops->binder_transfer_binder(from, to);
+}
+
+int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file)
+{
+       return security_ops->binder_transfer_file(from, to, file);
+}
+
 int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
 {
 #ifdef CONFIG_SECURITY_YAMA_STACKED
index dad36a6ab45f628860ef4c5c097d2a5abce15b4a..c223a32c0bb326d8d8f31e8e3f30990c9ddd3c4f 100644 (file)
@@ -444,11 +444,15 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
        avc_dump_query(ab, ad->selinux_audit_data->ssid,
                           ad->selinux_audit_data->tsid,
                           ad->selinux_audit_data->tclass);
+       if (ad->selinux_audit_data->denied) {
+               audit_log_format(ab, " permissive=%u",
+                                ad->selinux_audit_data->result ? 0 : 1);
+       }
 }
 
 /* This is the slow part of avc audit with big stack footprint */
 noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
-               u32 requested, u32 audited, u32 denied,
+               u32 requested, u32 audited, u32 denied, int result,
                struct common_audit_data *a,
                unsigned flags)
 {
@@ -477,6 +481,7 @@ noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
        sad.tsid = tsid;
        sad.audited = audited;
        sad.denied = denied;
+       sad.result = result;
 
        a->selinux_audit_data = &sad;
 
index 70d4a8a7f21c1b996379fe59af3c7cca5aa087cd..e00585266536f5dbb35583d03f9b9ec272692ec5 100644 (file)
@@ -1856,6 +1856,67 @@ static inline u32 open_file_to_av(struct file *file)
 
 /* Hook functions begin here. */
 
+static int selinux_binder_set_context_mgr(struct task_struct *mgr)
+{
+       u32 mysid = current_sid();
+       u32 mgrsid = task_sid(mgr);
+
+       return avc_has_perm(mysid, mgrsid, SECCLASS_BINDER, BINDER__SET_CONTEXT_MGR, NULL);
+}
+
+static int selinux_binder_transaction(struct task_struct *from, struct task_struct *to)
+{
+       u32 mysid = current_sid();
+       u32 fromsid = task_sid(from);
+       u32 tosid = task_sid(to);
+       int rc;
+
+       if (mysid != fromsid) {
+               rc = avc_has_perm(mysid, fromsid, SECCLASS_BINDER, BINDER__IMPERSONATE, NULL);
+               if (rc)
+                       return rc;
+       }
+
+       return avc_has_perm(fromsid, tosid, SECCLASS_BINDER, BINDER__CALL, NULL);
+}
+
+static int selinux_binder_transfer_binder(struct task_struct *from, struct task_struct *to)
+{
+       u32 fromsid = task_sid(from);
+       u32 tosid = task_sid(to);
+       return avc_has_perm(fromsid, tosid, SECCLASS_BINDER, BINDER__TRANSFER, NULL);
+}
+
+static int selinux_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file)
+{
+       u32 sid = task_sid(to);
+       struct file_security_struct *fsec = file->f_security;
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct inode_security_struct *isec = inode->i_security;
+       struct common_audit_data ad;
+       struct selinux_audit_data sad = {0,};
+       int rc;
+
+       ad.type = LSM_AUDIT_DATA_PATH;
+       ad.u.path = file->f_path;
+       ad.selinux_audit_data = &sad;
+
+       if (sid != fsec->sid) {
+               rc = avc_has_perm(sid, fsec->sid,
+                                 SECCLASS_FD,
+                                 FD__USE,
+                                 &ad);
+               if (rc)
+                       return rc;
+       }
+
+       if (unlikely(IS_PRIVATE(inode)))
+               return 0;
+
+       return avc_has_perm(sid, isec->sid, isec->sclass, file_to_av(file),
+                           &ad);
+}
+
 static int selinux_ptrace_access_check(struct task_struct *child,
                                     unsigned int mode)
 {
@@ -2672,6 +2733,7 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct nameidata *na
 
 static noinline int audit_inode_permission(struct inode *inode,
                                           u32 perms, u32 audited, u32 denied,
+                                          int result,
                                           unsigned flags)
 {
        struct common_audit_data ad;
@@ -2682,7 +2744,7 @@ static noinline int audit_inode_permission(struct inode *inode,
        ad.u.inode = inode;
 
        rc = slow_avc_audit(current_sid(), isec->sid, isec->sclass, perms,
-                           audited, denied, &ad, flags);
+                           audited, denied, result, &ad, flags);
        if (rc)
                return rc;
        return 0;
@@ -2724,7 +2786,7 @@ static int selinux_inode_permission(struct inode *inode, int mask)
        if (likely(!audited))
                return rc;
 
-       rc2 = audit_inode_permission(inode, perms, audited, denied, flags);
+       rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags);
        if (rc2)
                return rc2;
        return rc;
@@ -5656,6 +5718,11 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
 static struct security_operations selinux_ops = {
        .name =                         "selinux",
 
+       .binder_set_context_mgr =       selinux_binder_set_context_mgr,
+       .binder_transaction =           selinux_binder_transaction,
+       .binder_transfer_binder =       selinux_binder_transfer_binder,
+       .binder_transfer_file =         selinux_binder_transfer_file,
+
        .ptrace_access_check =          selinux_ptrace_access_check,
        .ptrace_traceme =               selinux_ptrace_traceme,
        .capget =                       selinux_capget,
index 92d0ab561db80cb4aa253815149a35e02d6175d1..28a08a891704a3f12adad6f37a7e8a306ac4d65f 100644 (file)
@@ -102,7 +102,7 @@ static inline u32 avc_audit_required(u32 requested,
 }
 
 int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
-                  u32 requested, u32 audited, u32 denied,
+                  u32 requested, u32 audited, u32 denied, int result,
                   struct common_audit_data *a,
                   unsigned flags);
 
@@ -137,7 +137,7 @@ static inline int avc_audit(u32 ssid, u32 tsid,
        if (likely(!audited))
                return 0;
        return slow_avc_audit(ssid, tsid, tclass,
-                             requested, audited, denied,
+                             requested, audited, denied, result,
                              a, flags);
 }
 
index 14d04e63b1f0e09ef15b4cf64057bd7cc861ed1d..c32ff7bde81ab97ab85f868661856dca828e3d6e 100644 (file)
@@ -151,5 +151,6 @@ struct security_class_mapping secclass_map[] = {
        { "kernel_service", { "use_as_override", "create_files_as", NULL } },
        { "tun_socket",
          { COMMON_SOCK_PERMS, "attach_queue", NULL } },
+       { "binder", { "impersonate", "call", "set_context_mgr", "transfer", NULL } },
        { NULL }
   };