Merge remote-tracking branch 'lsk/v3.10/topic/aosp' into linux-linaro-lsk-android
authorMark Brown <broonie@linaro.org>
Fri, 13 Dec 2013 18:52:59 +0000 (18:52 +0000)
committerMark Brown <broonie@linaro.org>
Fri, 13 Dec 2013 18:52:59 +0000 (18:52 +0000)
Conflicts:
drivers/staging/android/ion/ion_chunk_heap.c
kernel/printk.c

74 files changed:
android/configs/README
android/configs/android-base.cfg
android/configs/android-recommended.cfg
arch/arm/boot/Makefile
arch/arm/common/fiq_glue.S
arch/arm/common/fiq_glue_setup.c
arch/arm/include/asm/fiq_glue.h
drivers/gpu/Makefile
drivers/gpu/ion/Kconfig [deleted file]
drivers/gpu/ion/Makefile [deleted file]
drivers/gpu/ion/compat_ion.c [deleted file]
drivers/gpu/ion/compat_ion.h [deleted file]
drivers/gpu/ion/ion.c [deleted file]
drivers/gpu/ion/ion_carveout_heap.c [deleted file]
drivers/gpu/ion/ion_chunk_heap.c [deleted file]
drivers/gpu/ion/ion_cma_heap.c [deleted file]
drivers/gpu/ion/ion_heap.c [deleted file]
drivers/gpu/ion/ion_page_pool.c [deleted file]
drivers/gpu/ion/ion_priv.h [deleted file]
drivers/gpu/ion/ion_system_heap.c [deleted file]
drivers/gpu/ion/ion_system_mapper.c [deleted file]
drivers/gpu/ion/tegra/Makefile [deleted file]
drivers/gpu/ion/tegra/tegra_ion.c [deleted file]
drivers/staging/android/Kconfig
drivers/staging/android/Makefile
drivers/staging/android/android_alarm.h
drivers/staging/android/ashmem.h
drivers/staging/android/binder.h
drivers/staging/android/ion/Kconfig [new file with mode: 0644]
drivers/staging/android/ion/Makefile [new file with mode: 0644]
drivers/staging/android/ion/compat_ion.c [new file with mode: 0644]
drivers/staging/android/ion/compat_ion.h [new file with mode: 0644]
drivers/staging/android/ion/ion.c [new file with mode: 0644]
drivers/staging/android/ion/ion.h [new file with mode: 0644]
drivers/staging/android/ion/ion_carveout_heap.c [new file with mode: 0644]
drivers/staging/android/ion/ion_chunk_heap.c [new file with mode: 0644]
drivers/staging/android/ion/ion_cma_heap.c [new file with mode: 0644]
drivers/staging/android/ion/ion_heap.c [new file with mode: 0644]
drivers/staging/android/ion/ion_page_pool.c [new file with mode: 0644]
drivers/staging/android/ion/ion_priv.h [new file with mode: 0644]
drivers/staging/android/ion/ion_system_heap.c [new file with mode: 0644]
drivers/staging/android/ion/ion_test.c [new file with mode: 0644]
drivers/staging/android/ion/tegra/Makefile [new file with mode: 0644]
drivers/staging/android/ion/tegra/tegra_ion.c [new file with mode: 0644]
drivers/staging/android/sw_sync.h
drivers/staging/android/sync.h
drivers/staging/android/uapi/android_alarm.h [new file with mode: 0644]
drivers/staging/android/uapi/ashmem.h [new file with mode: 0644]
drivers/staging/android/uapi/binder.h [new file with mode: 0644]
drivers/staging/android/uapi/ion.h [new file with mode: 0644]
drivers/staging/android/uapi/ion_test.h [new file with mode: 0644]
drivers/staging/android/uapi/sw_sync.h [new file with mode: 0644]
drivers/staging/android/uapi/sync.h [new file with mode: 0644]
drivers/usb/gadget/f_mtp.c
drivers/video/Kconfig
drivers/video/adf/adf.c
drivers/video/adf/adf_client.c
drivers/video/adf/adf_fbdev.c
drivers/video/adf/adf_sysfs.c
include/linux/if_pppolac.h
include/linux/if_pppopns.h
include/linux/ion.h [deleted file]
include/linux/keychord.h
include/linux/usb/f_accessory.h
include/linux/usb/f_mtp.h
include/uapi/linux/if_pppolac.h [new file with mode: 0644]
include/uapi/linux/if_pppopns.h [new file with mode: 0644]
include/uapi/linux/keychord.h [new file with mode: 0644]
include/uapi/linux/usb/f_accessory.h [new file with mode: 0644]
include/uapi/linux/usb/f_mtp.h [new file with mode: 0644]
include/uapi/video/adf.h
kernel/sys.c
mm/mempolicy.c
net/netfilter/xt_qtaguid.c

index 391dffa6f85f5643ae70ddc48e8c9f4c26465282..8798731f89048e90e5b0e0becb5311b43fa0f5e1 100644 (file)
@@ -11,3 +11,5 @@ way to enable these options would be:
 This will generate a .config that can then be used to save a new defconfig or
 compile a new kernel with Android features enabled.
 
+Because there is no tool to consistently generate these config fragments,
+lets keep them alphabetically sorted instead of random.
index 2b98436550cf2124e87d4e900c429ade73ef24ed..25162024f8896a3eb7dd0a8152b7ee35a0455ec9 100644 (file)
@@ -1,3 +1,4 @@
+#  KEEP ALPHABETICALLY SORTED
 # CONFIG_INET_LRO is not set
 # CONFIG_MODULES is not set
 # CONFIG_OABI_COMPAT is not set
index 546c37f552bc46c639764af0d9640150f5f7943f..b0120f678cc4ab9aea21fd893a37491e4ba747cd 100644 (file)
@@ -1,7 +1,4 @@
-CONFIG_PANIC_TIMEOUT=5
-CONFIG_KALLSYMS_ALL=y
-CONFIG_PERF_EVENTS=y
-CONFIG_COMPACTION=y
+#  KEEP ALPHABETICALLY SORTED
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_LEGACY_PTYS is not set
@@ -14,6 +11,7 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_COMPACTION=y
 CONFIG_DM_UEVENT=y
 CONFIG_DRAGONRISE_FF=y
 CONFIG_EXT4_FS=y
@@ -81,6 +79,7 @@ CONFIG_ION=y
 CONFIG_JOYSTICK_XPAD=y
 CONFIG_JOYSTICK_XPAD_FF=y
 CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_KALLSYMS_ALL=y
 CONFIG_KSM=y
 CONFIG_LOGIG940_FF=y
 CONFIG_LOGIRUMBLEPAD2_FF=y
@@ -88,7 +87,9 @@ CONFIG_LOGITECH_FF=y
 CONFIG_MD=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_MSDOS_FS=y
+CONFIG_PANIC_TIMEOUT=5
 CONFIG_PANTHERLORD_FF=y
+CONFIG_PERF_EVENTS=y
 CONFIG_PM_DEBUG=y
 CONFIG_PM_RUNTIME=y
 CONFIG_PM_WAKELOCKS_LIMIT=0
index 84aa2caf07ed203fb810220258401a1b51f7cab3..085bb96493a373d7c2e2a17b20bf49f17b173f08 100644 (file)
@@ -14,6 +14,7 @@
 ifneq ($(MACHINE),)
 include $(srctree)/$(MACHINE)/Makefile.boot
 endif
+include $(srctree)/arch/arm/boot/dts/Makefile
 
 # Note: the following conditions must always be true:
 #   ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
index 9e3455a09f8fa36c1cfd27ba42ef83dec8e71fe9..24b42cec4813fa45f9abe0974cb9a3880500d4f8 100644 (file)
                /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
 
 ENTRY(fiq_glue)
-               /* store pc, cpsr from previous mode */
+               /* store pc, cpsr from previous mode, reserve space for spsr */
                mrs     r12, spsr
-               sub     r11, lr, #4
+               sub     lr, lr, #4
                subs    r10, #1
                bne     nested_fiq
 
-               stmfd   sp!, {r11-r12, lr}
+               str     r12, [sp, #-8]!
+               str     lr, [sp, #-4]!
 
                /* store r8-r14 from previous mode */
                sub     sp, sp, #(7 * 4)
@@ -85,12 +86,15 @@ fiq_from_usr_mode_exit:
                msr     cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
 
                ldmfd   sp!, {r0-r7}
-               add     sp, sp, #(7 * 4)
-               ldmfd   sp!, {r11-r12, lr}
+               ldr     lr, [sp, #(4 * 7)]
+               ldr     r12, [sp, #(4 * 8)]
+               add     sp, sp, #(10 * 4)
 exit_fiq:
                msr     spsr_cxsf, r12
                add     r10, #1
-               movs    pc, r11
+               cmp     r11, #0
+               moveqs  pc, lr
+               bx      r11 /* jump to custom fiq return function */
 
 nested_fiq:
                orr     r12, r12, #(PSR_F_BIT)
@@ -98,14 +102,17 @@ nested_fiq:
 
 fiq_glue_end:
 
-ENTRY(fiq_glue_setup) /* func, data, sp */
-               mrs             r3, cpsr
+ENTRY(fiq_glue_setup) /* func, data, sp, smc call number */
+               stmfd           sp!, {r4}
+               mrs             r4, cpsr
                msr             cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
                movs            r8, r0
                mov             r9, r1
                mov             sp, r2
+               mov             r11, r3
                moveq           r10, #0
                movne           r10, #1
-               msr             cpsr_c, r3
+               msr             cpsr_c, r4
+               ldmfd           sp!, {r4}
                bx              lr
 
index 4044c7db95c8c94c131aab30a44d5c9d44d9f1db..8cb1b611c6d57d0ae47e985432f04a3b1834d49d 100644 (file)
 #include <asm/fiq_glue.h>
 
 extern unsigned char fiq_glue, fiq_glue_end;
-extern void fiq_glue_setup(void *func, void *data, void *sp);
+extern void fiq_glue_setup(void *func, void *data, void *sp,
+                          fiq_return_handler_t fiq_return_handler);
 
 static struct fiq_handler fiq_debbuger_fiq_handler = {
        .name = "fiq_glue",
 };
 DEFINE_PER_CPU(void *, fiq_stack);
 static struct fiq_glue_handler *current_handler;
+static fiq_return_handler_t fiq_return_handler;
 static DEFINE_MUTEX(fiq_glue_lock);
 
 static void fiq_glue_setup_helper(void *info)
 {
        struct fiq_glue_handler *handler = info;
        fiq_glue_setup(handler->fiq, handler,
-               __get_cpu_var(fiq_stack) + THREAD_START_SP);
+               __get_cpu_var(fiq_stack) + THREAD_START_SP,
+               fiq_return_handler);
 }
 
 int fiq_glue_register_handler(struct fiq_glue_handler *handler)
@@ -80,6 +83,49 @@ err_busy:
        return ret;
 }
 
+static void fiq_glue_update_return_handler(void (*fiq_return)(void))
+{
+       fiq_return_handler = fiq_return;
+       if (current_handler)
+               on_each_cpu(fiq_glue_setup_helper, current_handler, true);
+}
+
+int fiq_glue_set_return_handler(void (*fiq_return)(void))
+{
+       int ret;
+
+       mutex_lock(&fiq_glue_lock);
+       if (fiq_return_handler) {
+               ret = -EBUSY;
+               goto err_busy;
+       }
+       fiq_glue_update_return_handler(fiq_return);
+       ret = 0;
+err_busy:
+       mutex_unlock(&fiq_glue_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(fiq_glue_set_return_handler);
+
+int fiq_glue_clear_return_handler(void (*fiq_return)(void))
+{
+       int ret;
+
+       mutex_lock(&fiq_glue_lock);
+       if (WARN_ON(fiq_return_handler != fiq_return)) {
+               ret = -EINVAL;
+               goto err_inval;
+       }
+       fiq_glue_update_return_handler(NULL);
+       ret = 0;
+err_inval:
+       mutex_unlock(&fiq_glue_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(fiq_glue_clear_return_handler);
+
 /**
  * fiq_glue_resume - Restore fiqs after suspend or low power idle states
  *
@@ -93,7 +139,8 @@ void fiq_glue_resume(void)
        if (!current_handler)
                return;
        fiq_glue_setup(current_handler->fiq, current_handler,
-               __get_cpu_var(fiq_stack) + THREAD_START_SP);
+               __get_cpu_var(fiq_stack) + THREAD_START_SP,
+               fiq_return_handler);
        if (current_handler->resume)
                current_handler->resume(current_handler);
 }
index d54c29db97a8345f87015c033f67101fc4fe8c94..a9e244f9f197c8cfd80d706a2a02c48cfb3f8f98 100644 (file)
@@ -18,8 +18,11 @@ struct fiq_glue_handler {
        void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp);
        void (*resume)(struct fiq_glue_handler *h);
 };
+typedef void (*fiq_return_handler_t)(void);
 
 int fiq_glue_register_handler(struct fiq_glue_handler *handler);
+int fiq_glue_set_return_handler(fiq_return_handler_t fiq_return);
+int fiq_glue_clear_return_handler(fiq_return_handler_t fiq_return);
 
 #ifdef CONFIG_FIQ_GLUE
 void fiq_glue_resume(void);
index a2efb62aa7760b28ae22d5ea8c9f1edc6a381148..d8a22c2a579d458c17f4fd5d7b57f5bdee13cf65 100644 (file)
@@ -1,2 +1,2 @@
-obj-y                  += drm/ vga/ ion/
+obj-y                  += drm/ vga/
 obj-$(CONFIG_TEGRA_HOST1X)     += host1x/
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
deleted file mode 100644 (file)
index c62f2cb..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-menuconfig ION
-       tristate "Ion Memory Manager"
-       depends on ARM
-       select GENERIC_ALLOCATOR
-       select DMA_SHARED_BUFFER
-       help
-         Chose this option to enable the ION Memory Manager.
-
-config ION_TEGRA
-       tristate "Ion for Tegra"
-       depends on ARCH_TEGRA && ION
-       help
-         Choose this option if you wish to use ion on an nVidia Tegra.
-
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
deleted file mode 100644 (file)
index 9c95665..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-obj-$(CONFIG_ION) +=   ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
-                       ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
-ifdef CONFIG_COMPAT
-obj-$(CONFIG_ION) += compat_ion.o
-endif
-obj-$(CONFIG_ION_TEGRA) += tegra/
diff --git a/drivers/gpu/ion/compat_ion.c b/drivers/gpu/ion/compat_ion.c
deleted file mode 100644 (file)
index e0d2839..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * drivers/gpu/ion/compat_ion.c
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/ion.h>
-#include <linux/compat.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-
-#include "compat_ion.h"
-
-/* See include/linux/ion.h for the definition of these structs */
-struct compat_ion_allocation_data {
-       compat_size_t len;
-       compat_size_t align;
-       compat_uint_t heap_id_mask;
-       compat_uint_t flags;
-       compat_int_t handle;
-};
-
-struct compat_ion_custom_data {
-       compat_uint_t cmd;
-       compat_ulong_t arg;
-};
-
-static int compat_get_ion_allocation_data(
-                       struct compat_ion_allocation_data __user *data32,
-                       struct ion_allocation_data __user *data)
-{
-       compat_size_t s;
-       compat_uint_t u;
-       compat_int_t i;
-       int err;
-
-       err = get_user(s, &data32->len);
-       err |= put_user(s, &data->len);
-       err |= get_user(s, &data32->align);
-       err |= put_user(s, &data->align);
-       err |= get_user(u, &data32->heap_id_mask);
-       err |= put_user(u, &data->heap_id_mask);
-       err |= get_user(u, &data32->flags);
-       err |= put_user(u, &data->flags);
-       err |= get_user(i, &data32->handle);
-       err |= put_user(i, &data->handle);
-
-       return err;
-}
-
-static int compat_put_ion_allocation_data(
-                       struct compat_ion_allocation_data __user *data32,
-                       struct ion_allocation_data __user *data)
-{
-       compat_size_t s;
-       compat_uint_t u;
-       compat_int_t i;
-       int err;
-
-       err = get_user(s, &data->len);
-       err |= put_user(s, &data32->len);
-       err |= get_user(s, &data->align);
-       err |= put_user(s, &data32->align);
-       err |= get_user(u, &data->heap_id_mask);
-       err |= put_user(u, &data32->heap_id_mask);
-       err |= get_user(u, &data->flags);
-       err |= put_user(u, &data32->flags);
-       err |= get_user(i, &data->handle);
-       err |= put_user(i, &data32->handle);
-
-       return err;
-}
-
-static int compat_get_ion_custom_data(
-                       struct compat_ion_custom_data __user *data32,
-                       struct ion_custom_data __user *data)
-{
-        compat_uint_t cmd;
-        compat_ulong_t arg;
-       int err;
-
-       err = get_user(cmd, &data32->cmd);
-       err |= put_user(cmd, &data->cmd);
-       err |= get_user(arg, &data32->arg);
-       err |= put_user(arg, &data->arg);
-
-       return err;
-};
-
-long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-       long ret;
-
-       if (!filp->f_op || !filp->f_op->unlocked_ioctl)
-               return -ENOTTY;
-
-       switch (cmd) {
-       case ION_IOC_ALLOC:
-       {
-               struct compat_ion_allocation_data __user *data32;
-               struct ion_allocation_data __user *data;
-               int err;
-
-               data32 = compat_ptr(arg);
-               data = compat_alloc_user_space(sizeof(*data));
-               if (data == NULL)
-                       return -EFAULT;
-
-               err = compat_get_ion_allocation_data(data32, data);
-               if (err)
-                       return err;
-
-               ret = filp->f_op->unlocked_ioctl(filp, cmd,
-                                                       (unsigned long)data);
-               err = compat_put_ion_allocation_data(data32, data);
-               return ret ? ret : err;
-       }
-       case ION_IOC_FREE:
-       {
-               struct compat_ion_allocation_data __user *data32;
-               struct ion_allocation_data __user *data;
-               int err;
-
-               data32 = compat_ptr(arg);
-               data = compat_alloc_user_space(sizeof(*data));
-               if (data == NULL)
-                       return -EFAULT;
-
-               err = compat_get_ion_allocation_data(data32, data);
-               if (err)
-                       return err;
-
-               return filp->f_op->unlocked_ioctl(filp, cmd,
-                                                       (unsigned long)data);
-       }
-       case ION_IOC_CUSTOM: {
-               struct compat_ion_custom_data __user *data32;
-               struct ion_custom_data __user *data;
-               int err;
-
-               data32 = compat_ptr(arg);
-               data = compat_alloc_user_space(sizeof(*data));
-               if (data == NULL)
-                       return -EFAULT;
-
-               err = compat_get_ion_custom_data(data32, data);
-               if (err)
-                       return err;
-
-               return filp->f_op->unlocked_ioctl(filp, cmd,
-                                                       (unsigned long)data);
-       }
-       case ION_IOC_SHARE:
-       case ION_IOC_MAP:
-       case ION_IOC_IMPORT:
-       case ION_IOC_SYNC:
-               return filp->f_op->unlocked_ioctl(filp, cmd,
-                                               (unsigned long)compat_ptr(arg));
-       default:
-               return -ENOIOCTLCMD;
-       }
-}
diff --git a/drivers/gpu/ion/compat_ion.h b/drivers/gpu/ion/compat_ion.h
deleted file mode 100644 (file)
index 3a9c8c0..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
-
- * drivers/gpu/ion/compat_ion.h
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_COMPAT_ION_H
-#define _LINUX_COMPAT_ION_H
-
-#if IS_ENABLED(CONFIG_COMPAT)
-
-long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-
-#else
-
-#define compat_ion_ioctl  NULL
-
-#endif /* CONFIG_COMPAT */
-#endif /* _LINUX_COMPAT_ION_H */
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
deleted file mode 100644 (file)
index e4ffc9d..0000000
+++ /dev/null
@@ -1,1497 +0,0 @@
-/*
-
- * drivers/gpu/ion/ion.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/device.h>
-#include <linux/file.h>
-#include <linux/freezer.h>
-#include <linux/fs.h>
-#include <linux/anon_inodes.h>
-#include <linux/ion.h>
-#include <linux/kthread.h>
-#include <linux/list.h>
-#include <linux/memblock.h>
-#include <linux/miscdevice.h>
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/mm_types.h>
-#include <linux/rbtree.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/debugfs.h>
-#include <linux/dma-buf.h>
-#include <linux/idr.h>
-
-#include "ion_priv.h"
-#include "compat_ion.h"
-
-/**
- * struct ion_device - the metadata of the ion device node
- * @dev:               the actual misc device
- * @buffers:           an rb tree of all the existing buffers
- * @buffer_lock:       lock protecting the tree of buffers
- * @lock:              rwsem protecting the tree of heaps and clients
- * @heaps:             list of all the heaps in the system
- * @user_clients:      list of all the clients created from userspace
- */
-struct ion_device {
-       struct miscdevice dev;
-       struct rb_root buffers;
-       struct mutex buffer_lock;
-       struct rw_semaphore lock;
-       struct plist_head heaps;
-       long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
-                             unsigned long arg);
-       struct rb_root clients;
-       struct dentry *debug_root;
-};
-
-/**
- * struct ion_client - a process/hw block local address space
- * @node:              node in the tree of all clients
- * @dev:               backpointer to ion device
- * @handles:           an rb tree of all the handles in this client
- * @idr:               an idr space for allocating handle ids
- * @lock:              lock protecting the tree of handles
- * @name:              used for debugging
- * @task:              used for debugging
- *
- * A client represents a list of buffers this client may access.
- * The mutex stored here is used to protect both handles tree
- * as well as the handles themselves, and should be held while modifying either.
- */
-struct ion_client {
-       struct rb_node node;
-       struct ion_device *dev;
-       struct rb_root handles;
-       struct idr idr;
-       struct mutex lock;
-       const char *name;
-       struct task_struct *task;
-       pid_t pid;
-       struct dentry *debug_root;
-};
-
-/**
- * ion_handle - a client local reference to a buffer
- * @ref:               reference count
- * @client:            back pointer to the client the buffer resides in
- * @buffer:            pointer to the buffer
- * @node:              node in the client's handle rbtree
- * @kmap_cnt:          count of times this client has mapped to kernel
- * @id:                        client-unique id allocated by client->idr
- *
- * Modifications to node, map_cnt or mapping should be protected by the
- * lock in the client.  Other fields are never changed after initialization.
- */
-struct ion_handle {
-       struct kref ref;
-       struct ion_client *client;
-       struct ion_buffer *buffer;
-       struct rb_node node;
-       unsigned int kmap_cnt;
-       int id;
-};
-
-bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
-{
-       return ((buffer->flags & ION_FLAG_CACHED) &&
-               !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
-}
-
-bool ion_buffer_cached(struct ion_buffer *buffer)
-{
-       return !!(buffer->flags & ION_FLAG_CACHED);
-}
-
-static inline struct page *ion_buffer_page(struct page *page)
-{
-       return (struct page *)((unsigned long)page & ~(1UL));
-}
-
-static inline bool ion_buffer_page_is_dirty(struct page *page)
-{
-       return !!((unsigned long)page & 1UL);
-}
-
-static inline void ion_buffer_page_dirty(struct page **page)
-{
-       *page = (struct page *)((unsigned long)(*page) | 1UL);
-}
-
-static inline void ion_buffer_page_clean(struct page **page)
-{
-       *page = (struct page *)((unsigned long)(*page) & ~(1UL));
-}
-
-/* this function should only be called while dev->lock is held */
-static void ion_buffer_add(struct ion_device *dev,
-                          struct ion_buffer *buffer)
-{
-       struct rb_node **p = &dev->buffers.rb_node;
-       struct rb_node *parent = NULL;
-       struct ion_buffer *entry;
-
-       while (*p) {
-               parent = *p;
-               entry = rb_entry(parent, struct ion_buffer, node);
-
-               if (buffer < entry) {
-                       p = &(*p)->rb_left;
-               } else if (buffer > entry) {
-                       p = &(*p)->rb_right;
-               } else {
-                       pr_err("%s: buffer already found.", __func__);
-                       BUG();
-               }
-       }
-
-       rb_link_node(&buffer->node, parent, p);
-       rb_insert_color(&buffer->node, &dev->buffers);
-}
-
-/* this function should only be called while dev->lock is held */
-static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
-                                    struct ion_device *dev,
-                                    unsigned long len,
-                                    unsigned long align,
-                                    unsigned long flags)
-{
-       struct ion_buffer *buffer;
-       struct sg_table *table;
-       struct scatterlist *sg;
-       int i, ret;
-
-       buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
-       if (!buffer)
-               return ERR_PTR(-ENOMEM);
-
-       buffer->heap = heap;
-       buffer->flags = flags;
-       kref_init(&buffer->ref);
-
-       ret = heap->ops->allocate(heap, buffer, len, align, flags);
-
-       if (ret) {
-               if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
-                       goto err2;
-
-               ion_heap_freelist_drain(heap, 0);
-               ret = heap->ops->allocate(heap, buffer, len, align,
-                                         flags);
-               if (ret)
-                       goto err2;
-       }
-
-       buffer->dev = dev;
-       buffer->size = len;
-
-       table = heap->ops->map_dma(heap, buffer);
-       if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
-               table = ERR_PTR(-EINVAL);
-       if (IS_ERR(table)) {
-               heap->ops->free(buffer);
-               kfree(buffer);
-               return ERR_PTR(PTR_ERR(table));
-       }
-       buffer->sg_table = table;
-       if (ion_buffer_fault_user_mappings(buffer)) {
-               int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
-               struct scatterlist *sg;
-               int i, j, k = 0;
-
-               buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
-               if (!buffer->pages) {
-                       ret = -ENOMEM;
-                       goto err1;
-               }
-
-               for_each_sg(table->sgl, sg, table->nents, i) {
-                       struct page *page = sg_page(sg);
-
-                       for (j = 0; j < sg_dma_len(sg) / PAGE_SIZE; j++)
-                               buffer->pages[k++] = page++;
-               }
-
-               if (ret)
-                       goto err;
-       }
-
-       buffer->dev = dev;
-       buffer->size = len;
-       INIT_LIST_HEAD(&buffer->vmas);
-       mutex_init(&buffer->lock);
-       /* this will set up dma addresses for the sglist -- it is not
-          technically correct as per the dma api -- a specific
-          device isn't really taking ownership here.  However, in practice on
-          our systems the only dma_address space is physical addresses.
-          Additionally, we can't afford the overhead of invalidating every
-          allocation via dma_map_sg. The implicit contract here is that
-          memory comming from the heaps is ready for dma, ie if it has a
-          cached mapping that mapping has been invalidated */
-       for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
-               sg_dma_address(sg) = sg_phys(sg);
-       mutex_lock(&dev->buffer_lock);
-       ion_buffer_add(dev, buffer);
-       mutex_unlock(&dev->buffer_lock);
-       return buffer;
-
-err:
-       heap->ops->unmap_dma(heap, buffer);
-       heap->ops->free(buffer);
-err1:
-       if (buffer->pages)
-               vfree(buffer->pages);
-err2:
-       kfree(buffer);
-       return ERR_PTR(ret);
-}
-
-void ion_buffer_destroy(struct ion_buffer *buffer)
-{
-       if (WARN_ON(buffer->kmap_cnt > 0))
-               buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
-       buffer->heap->ops->unmap_dma(buffer->heap, buffer);
-       buffer->heap->ops->free(buffer);
-       if (buffer->pages)
-               vfree(buffer->pages);
-       kfree(buffer);
-}
-
-static void _ion_buffer_destroy(struct kref *kref)
-{
-       struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
-       struct ion_heap *heap = buffer->heap;
-       struct ion_device *dev = buffer->dev;
-
-       mutex_lock(&dev->buffer_lock);
-       rb_erase(&buffer->node, &dev->buffers);
-       mutex_unlock(&dev->buffer_lock);
-
-       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
-               ion_heap_freelist_add(heap, buffer);
-       else
-               ion_buffer_destroy(buffer);
-}
-
-static void ion_buffer_get(struct ion_buffer *buffer)
-{
-       kref_get(&buffer->ref);
-}
-
-static int ion_buffer_put(struct ion_buffer *buffer)
-{
-       return kref_put(&buffer->ref, _ion_buffer_destroy);
-}
-
-static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
-{
-       mutex_lock(&buffer->lock);
-       buffer->handle_count++;
-       mutex_unlock(&buffer->lock);
-}
-
-static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
-{
-       /*
-        * when a buffer is removed from a handle, if it is not in
-        * any other handles, copy the taskcomm and the pid of the
-        * process it's being removed from into the buffer.  At this
-        * point there will be no way to track what processes this buffer is
-        * being used by, it only exists as a dma_buf file descriptor.
-        * The taskcomm and pid can provide a debug hint as to where this fd
-        * is in the system
-        */
-       mutex_lock(&buffer->lock);
-       buffer->handle_count--;
-       BUG_ON(buffer->handle_count < 0);
-       if (!buffer->handle_count) {
-               struct task_struct *task;
-
-               task = current->group_leader;
-               get_task_comm(buffer->task_comm, task);
-               buffer->pid = task_pid_nr(task);
-       }
-       mutex_unlock(&buffer->lock);
-}
-
-static struct ion_handle *ion_handle_create(struct ion_client *client,
-                                    struct ion_buffer *buffer)
-{
-       struct ion_handle *handle;
-
-       handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
-       if (!handle)
-               return ERR_PTR(-ENOMEM);
-       kref_init(&handle->ref);
-       RB_CLEAR_NODE(&handle->node);
-       handle->client = client;
-       ion_buffer_get(buffer);
-       ion_buffer_add_to_handle(buffer);
-       handle->buffer = buffer;
-
-       return handle;
-}
-
-static void ion_handle_kmap_put(struct ion_handle *);
-
-static void ion_handle_destroy(struct kref *kref)
-{
-       struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
-       struct ion_client *client = handle->client;
-       struct ion_buffer *buffer = handle->buffer;
-
-       mutex_lock(&buffer->lock);
-       while (handle->kmap_cnt)
-               ion_handle_kmap_put(handle);
-       mutex_unlock(&buffer->lock);
-
-       idr_remove(&client->idr, handle->id);
-       if (!RB_EMPTY_NODE(&handle->node))
-               rb_erase(&handle->node, &client->handles);
-
-       ion_buffer_remove_from_handle(buffer);
-       ion_buffer_put(buffer);
-
-       kfree(handle);
-}
-
-struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
-{
-       return handle->buffer;
-}
-
-static void ion_handle_get(struct ion_handle *handle)
-{
-       kref_get(&handle->ref);
-}
-
-static int ion_handle_put(struct ion_handle *handle)
-{
-       return kref_put(&handle->ref, ion_handle_destroy);
-}
-
-static struct ion_handle *ion_handle_lookup(struct ion_client *client,
-                                           struct ion_buffer *buffer)
-{
-       struct rb_node *n = client->handles.rb_node;
-
-       while (n) {
-               struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
-               if (buffer < entry->buffer)
-                       n = n->rb_left;
-               else if (buffer > entry->buffer)
-                       n = n->rb_right;
-               else
-                       return entry;
-       }
-       return ERR_PTR(-EINVAL);
-}
-
-static struct ion_handle *ion_uhandle_get(struct ion_client *client, int id)
-{
-       return idr_find(&client->idr, id);
-}
-
-static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
-{
-       return (ion_uhandle_get(client, handle->id) == handle);
-}
-
-static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
-{
-       int rc;
-       struct rb_node **p = &client->handles.rb_node;
-       struct rb_node *parent = NULL;
-       struct ion_handle *entry;
-
-       do {
-               int id;
-               rc = idr_pre_get(&client->idr, GFP_KERNEL);
-               if (!rc)
-                       return -ENOMEM;
-               rc = idr_get_new_above(&client->idr, handle, 1, &id);
-               handle->id = id;
-       } while (rc == -EAGAIN);
-
-       if (rc < 0)
-               return rc;
-
-       while (*p) {
-               parent = *p;
-               entry = rb_entry(parent, struct ion_handle, node);
-
-               if (handle->buffer < entry->buffer)
-                       p = &(*p)->rb_left;
-               else if (handle->buffer > entry->buffer)
-                       p = &(*p)->rb_right;
-               else
-                       WARN(1, "%s: buffer already found.", __func__);
-       }
-
-       rb_link_node(&handle->node, parent, p);
-       rb_insert_color(&handle->node, &client->handles);
-
-       return 0;
-}
-
-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
-                            size_t align, unsigned int heap_id_mask,
-                            unsigned int flags)
-{
-       struct ion_handle *handle;
-       struct ion_device *dev = client->dev;
-       struct ion_buffer *buffer = NULL;
-       struct ion_heap *heap;
-       int ret;
-
-       pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
-                len, align, heap_id_mask, flags);
-       /*
-        * traverse the list of heaps available in this system in priority
-        * order.  If the heap type is supported by the client, and matches the
-        * request of the caller allocate from it.  Repeat until allocate has
-        * succeeded or all heaps have been tried
-        */
-       if (WARN_ON(!len))
-               return ERR_PTR(-EINVAL);
-
-       len = PAGE_ALIGN(len);
-
-       down_read(&dev->lock);
-       plist_for_each_entry(heap, &dev->heaps, node) {
-               /* if the caller didn't specify this heap id */
-               if (!((1 << heap->id) & heap_id_mask))
-                       continue;
-               buffer = ion_buffer_create(heap, dev, len, align, flags);
-               if (!IS_ERR(buffer))
-                       break;
-       }
-       up_read(&dev->lock);
-
-       if (buffer == NULL)
-               return ERR_PTR(-ENODEV);
-
-       if (IS_ERR(buffer))
-               return ERR_PTR(PTR_ERR(buffer));
-
-       handle = ion_handle_create(client, buffer);
-
-       /*
-        * ion_buffer_create will create a buffer with a ref_cnt of 1,
-        * and ion_handle_create will take a second reference, drop one here
-        */
-       ion_buffer_put(buffer);
-
-       if (IS_ERR(handle))
-               return handle;
-
-       mutex_lock(&client->lock);
-       ret = ion_handle_add(client, handle);
-       if (ret) {
-               ion_handle_put(handle);
-               handle = ERR_PTR(ret);
-       }
-       mutex_unlock(&client->lock);
-
-       return handle;
-}
-EXPORT_SYMBOL(ion_alloc);
-
-void ion_free(struct ion_client *client, struct ion_handle *handle)
-{
-       bool valid_handle;
-
-       BUG_ON(client != handle->client);
-
-       mutex_lock(&client->lock);
-       valid_handle = ion_handle_validate(client, handle);
-
-       if (!valid_handle) {
-               WARN(1, "%s: invalid handle passed to free.\n", __func__);
-               mutex_unlock(&client->lock);
-               return;
-       }
-       ion_handle_put(handle);
-       mutex_unlock(&client->lock);
-}
-EXPORT_SYMBOL(ion_free);
-
-int ion_phys(struct ion_client *client, struct ion_handle *handle,
-            ion_phys_addr_t *addr, size_t *len)
-{
-       struct ion_buffer *buffer;
-       int ret;
-
-       mutex_lock(&client->lock);
-       if (!ion_handle_validate(client, handle)) {
-               mutex_unlock(&client->lock);
-               return -EINVAL;
-       }
-
-       buffer = handle->buffer;
-
-       if (!buffer->heap->ops->phys) {
-               pr_err("%s: ion_phys is not implemented by this heap.\n",
-                      __func__);
-               mutex_unlock(&client->lock);
-               return -ENODEV;
-       }
-       mutex_unlock(&client->lock);
-       ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
-       return ret;
-}
-EXPORT_SYMBOL(ion_phys);
-
-static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
-{
-       void *vaddr;
-
-       if (buffer->kmap_cnt) {
-               buffer->kmap_cnt++;
-               return buffer->vaddr;
-       }
-       vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
-       if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
-               return ERR_PTR(-EINVAL);
-       if (IS_ERR(vaddr))
-               return vaddr;
-       buffer->vaddr = vaddr;
-       buffer->kmap_cnt++;
-       return vaddr;
-}
-
-static void *ion_handle_kmap_get(struct ion_handle *handle)
-{
-       struct ion_buffer *buffer = handle->buffer;
-       void *vaddr;
-
-       if (handle->kmap_cnt) {
-               handle->kmap_cnt++;
-               return buffer->vaddr;
-       }
-       vaddr = ion_buffer_kmap_get(buffer);
-       if (IS_ERR(vaddr))
-               return vaddr;
-       handle->kmap_cnt++;
-       return vaddr;
-}
-
-static void ion_buffer_kmap_put(struct ion_buffer *buffer)
-{
-       buffer->kmap_cnt--;
-       if (!buffer->kmap_cnt) {
-               buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
-               buffer->vaddr = NULL;
-       }
-}
-
-static void ion_handle_kmap_put(struct ion_handle *handle)
-{
-       struct ion_buffer *buffer = handle->buffer;
-
-       handle->kmap_cnt--;
-       if (!handle->kmap_cnt)
-               ion_buffer_kmap_put(buffer);
-}
-
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
-{
-       struct ion_buffer *buffer;
-       void *vaddr;
-
-       mutex_lock(&client->lock);
-       if (!ion_handle_validate(client, handle)) {
-               pr_err("%s: invalid handle passed to map_kernel.\n",
-                      __func__);
-               mutex_unlock(&client->lock);
-               return ERR_PTR(-EINVAL);
-       }
-
-       buffer = handle->buffer;
-
-       if (!handle->buffer->heap->ops->map_kernel) {
-               pr_err("%s: map_kernel is not implemented by this heap.\n",
-                      __func__);
-               mutex_unlock(&client->lock);
-               return ERR_PTR(-ENODEV);
-       }
-
-       mutex_lock(&buffer->lock);
-       vaddr = ion_handle_kmap_get(handle);
-       mutex_unlock(&buffer->lock);
-       mutex_unlock(&client->lock);
-       return vaddr;
-}
-EXPORT_SYMBOL(ion_map_kernel);
-
-void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
-{
-       struct ion_buffer *buffer;
-
-       mutex_lock(&client->lock);
-       buffer = handle->buffer;
-       mutex_lock(&buffer->lock);
-       ion_handle_kmap_put(handle);
-       mutex_unlock(&buffer->lock);
-       mutex_unlock(&client->lock);
-}
-EXPORT_SYMBOL(ion_unmap_kernel);
-
-static int ion_debug_client_show(struct seq_file *s, void *unused)
-{
-       struct ion_client *client = s->private;
-       struct rb_node *n;
-       size_t sizes[ION_NUM_HEAP_IDS] = {0};
-       const char *names[ION_NUM_HEAP_IDS] = {0};
-       int i;
-
-       mutex_lock(&client->lock);
-       for (n = rb_first(&client->handles); n; n = rb_next(n)) {
-               struct ion_handle *handle = rb_entry(n, struct ion_handle,
-                                                    node);
-               unsigned int id = handle->buffer->heap->id;
-
-               if (!names[id])
-                       names[id] = handle->buffer->heap->name;
-               sizes[id] += handle->buffer->size;
-       }
-       mutex_unlock(&client->lock);
-
-       seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
-       for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
-               if (!names[i])
-                       continue;
-               seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
-       }
-       return 0;
-}
-
-static int ion_debug_client_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ion_debug_client_show, inode->i_private);
-}
-
-static const struct file_operations debug_client_fops = {
-       .open = ion_debug_client_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-struct ion_client *ion_client_create(struct ion_device *dev,
-                                    const char *name)
-{
-       struct ion_client *client;
-       struct task_struct *task;
-       struct rb_node **p;
-       struct rb_node *parent = NULL;
-       struct ion_client *entry;
-       char debug_name[64];
-       pid_t pid;
-
-       get_task_struct(current->group_leader);
-       task_lock(current->group_leader);
-       pid = task_pid_nr(current->group_leader);
-       /* don't bother to store task struct for kernel threads,
-          they can't be killed anyway */
-       if (current->group_leader->flags & PF_KTHREAD) {
-               put_task_struct(current->group_leader);
-               task = NULL;
-       } else {
-               task = current->group_leader;
-       }
-       task_unlock(current->group_leader);
-
-       client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
-       if (!client) {
-               if (task)
-                       put_task_struct(current->group_leader);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       client->dev = dev;
-       client->handles = RB_ROOT;
-       idr_init(&client->idr);
-       mutex_init(&client->lock);
-       client->name = name;
-       client->task = task;
-       client->pid = pid;
-
-       down_write(&dev->lock);
-       p = &dev->clients.rb_node;
-       while (*p) {
-               parent = *p;
-               entry = rb_entry(parent, struct ion_client, node);
-
-               if (client < entry)
-                       p = &(*p)->rb_left;
-               else if (client > entry)
-                       p = &(*p)->rb_right;
-       }
-       rb_link_node(&client->node, parent, p);
-       rb_insert_color(&client->node, &dev->clients);
-
-       snprintf(debug_name, 64, "%u", client->pid);
-       client->debug_root = debugfs_create_file(debug_name, 0664,
-                                                dev->debug_root, client,
-                                                &debug_client_fops);
-       up_write(&dev->lock);
-
-       return client;
-}
-EXPORT_SYMBOL(ion_client_create);
-
-void ion_client_destroy(struct ion_client *client)
-{
-       struct ion_device *dev = client->dev;
-       struct rb_node *n;
-
-       pr_debug("%s: %d\n", __func__, __LINE__);
-       while ((n = rb_first(&client->handles))) {
-               struct ion_handle *handle = rb_entry(n, struct ion_handle,
-                                                    node);
-               ion_handle_destroy(&handle->ref);
-       }
-
-       idr_remove_all(&client->idr);
-       idr_destroy(&client->idr);
-
-       down_write(&dev->lock);
-       if (client->task)
-               put_task_struct(client->task);
-       rb_erase(&client->node, &dev->clients);
-       debugfs_remove_recursive(client->debug_root);
-       up_write(&dev->lock);
-
-       kfree(client);
-}
-EXPORT_SYMBOL(ion_client_destroy);
-
-struct sg_table *ion_sg_table(struct ion_client *client,
-                             struct ion_handle *handle)
-{
-       struct ion_buffer *buffer;
-       struct sg_table *table;
-
-       mutex_lock(&client->lock);
-       if (!ion_handle_validate(client, handle)) {
-               pr_err("%s: invalid handle passed to map_dma.\n",
-                      __func__);
-               mutex_unlock(&client->lock);
-               return ERR_PTR(-EINVAL);
-       }
-       buffer = handle->buffer;
-       table = buffer->sg_table;
-       mutex_unlock(&client->lock);
-       return table;
-}
-EXPORT_SYMBOL(ion_sg_table);
-
-static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
-                                      struct device *dev,
-                                      enum dma_data_direction direction);
-
-static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
-                                       enum dma_data_direction direction)
-{
-       struct dma_buf *dmabuf = attachment->dmabuf;
-       struct ion_buffer *buffer = dmabuf->priv;
-
-       ion_buffer_sync_for_device(buffer, attachment->dev, direction);
-       return buffer->sg_table;
-}
-
-static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
-                             struct sg_table *table,
-                             enum dma_data_direction direction)
-{
-}
-
-struct ion_vma_list {
-       struct list_head list;
-       struct vm_area_struct *vma;
-};
-
-static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
-                                      struct device *dev,
-                                      enum dma_data_direction dir)
-{
-       struct ion_vma_list *vma_list;
-       int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
-       int i;
-
-       pr_debug("%s: syncing for device %s\n", __func__,
-                dev ? dev_name(dev) : "null");
-
-       if (!ion_buffer_fault_user_mappings(buffer))
-               return;
-
-       mutex_lock(&buffer->lock);
-       for (i = 0; i < pages; i++) {
-               struct page *page = buffer->pages[i];
-
-               if (ion_buffer_page_is_dirty(page))
-                       __dma_page_cpu_to_dev(page, 0, PAGE_SIZE, dir);
-               ion_buffer_page_clean(buffer->pages + i);
-       }
-       list_for_each_entry(vma_list, &buffer->vmas, list) {
-               struct vm_area_struct *vma = vma_list->vma;
-
-               zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
-                              NULL);
-       }
-       mutex_unlock(&buffer->lock);
-}
-
-int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-       struct ion_buffer *buffer = vma->vm_private_data;
-       int ret;
-
-       mutex_lock(&buffer->lock);
-       ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
-
-       BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
-       ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
-                            ion_buffer_page(buffer->pages[vmf->pgoff]));
-       mutex_unlock(&buffer->lock);
-       if (ret)
-               return VM_FAULT_ERROR;
-
-       return VM_FAULT_NOPAGE;
-}
-
-static void ion_vm_open(struct vm_area_struct *vma)
-{
-       struct ion_buffer *buffer = vma->vm_private_data;
-       struct ion_vma_list *vma_list;
-
-       vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
-       if (!vma_list)
-               return;
-       vma_list->vma = vma;
-       mutex_lock(&buffer->lock);
-       list_add(&vma_list->list, &buffer->vmas);
-       mutex_unlock(&buffer->lock);
-       pr_debug("%s: adding %p\n", __func__, vma);
-}
-
-static void ion_vm_close(struct vm_area_struct *vma)
-{
-       struct ion_buffer *buffer = vma->vm_private_data;
-       struct ion_vma_list *vma_list, *tmp;
-
-       pr_debug("%s\n", __func__);
-       mutex_lock(&buffer->lock);
-       list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
-               if (vma_list->vma != vma)
-                       continue;
-               list_del(&vma_list->list);
-               kfree(vma_list);
-               pr_debug("%s: deleting %p\n", __func__, vma);
-               break;
-       }
-       mutex_unlock(&buffer->lock);
-}
-
-struct vm_operations_struct ion_vma_ops = {
-       .open = ion_vm_open,
-       .close = ion_vm_close,
-       .fault = ion_vm_fault,
-};
-
-static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
-{
-       struct ion_buffer *buffer = dmabuf->priv;
-       int ret = 0;
-
-       if (!buffer->heap->ops->map_user) {
-               pr_err("%s: this heap does not define a method for mapping "
-                      "to userspace\n", __func__);
-               return -EINVAL;
-       }
-
-       if (ion_buffer_fault_user_mappings(buffer)) {
-               vma->vm_private_data = buffer;
-               vma->vm_ops = &ion_vma_ops;
-               ion_vm_open(vma);
-               return 0;
-       }
-
-       if (!(buffer->flags & ION_FLAG_CACHED))
-               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
-       mutex_lock(&buffer->lock);
-       /* now map it to userspace */
-       ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
-       mutex_unlock(&buffer->lock);
-
-       if (ret)
-               pr_err("%s: failure mapping buffer to userspace\n",
-                      __func__);
-
-       return ret;
-}
-
-static void ion_dma_buf_release(struct dma_buf *dmabuf)
-{
-       struct ion_buffer *buffer = dmabuf->priv;
-       ion_buffer_put(buffer);
-}
-
-static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
-{
-       struct ion_buffer *buffer = dmabuf->priv;
-       return buffer->vaddr + offset * PAGE_SIZE;
-}
-
-static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
-                              void *ptr)
-{
-       return;
-}
-
-static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
-                                       size_t len,
-                                       enum dma_data_direction direction)
-{
-       struct ion_buffer *buffer = dmabuf->priv;
-       void *vaddr;
-
-       if (!buffer->heap->ops->map_kernel) {
-               pr_err("%s: map kernel is not implemented by this heap.\n",
-                      __func__);
-               return -ENODEV;
-       }
-
-       mutex_lock(&buffer->lock);
-       vaddr = ion_buffer_kmap_get(buffer);
-       mutex_unlock(&buffer->lock);
-       if (IS_ERR(vaddr))
-               return PTR_ERR(vaddr);
-       return 0;
-}
-
-static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
-                                      size_t len,
-                                      enum dma_data_direction direction)
-{
-       struct ion_buffer *buffer = dmabuf->priv;
-
-       mutex_lock(&buffer->lock);
-       ion_buffer_kmap_put(buffer);
-       mutex_unlock(&buffer->lock);
-}
-
-struct dma_buf_ops dma_buf_ops = {
-       .map_dma_buf = ion_map_dma_buf,
-       .unmap_dma_buf = ion_unmap_dma_buf,
-       .mmap = ion_mmap,
-       .release = ion_dma_buf_release,
-       .begin_cpu_access = ion_dma_buf_begin_cpu_access,
-       .end_cpu_access = ion_dma_buf_end_cpu_access,
-       .kmap_atomic = ion_dma_buf_kmap,
-       .kunmap_atomic = ion_dma_buf_kunmap,
-       .kmap = ion_dma_buf_kmap,
-       .kunmap = ion_dma_buf_kunmap,
-};
-
-struct dma_buf *ion_share_dma_buf(struct ion_client *client,
-                                               struct ion_handle *handle)
-{
-       struct ion_buffer *buffer;
-       struct dma_buf *dmabuf;
-       bool valid_handle;
-
-       mutex_lock(&client->lock);
-       valid_handle = ion_handle_validate(client, handle);
-       mutex_unlock(&client->lock);
-       if (!valid_handle) {
-               WARN(1, "%s: invalid handle passed to share.\n", __func__);
-               return ERR_PTR(-EINVAL);
-       }
-
-       buffer = handle->buffer;
-       ion_buffer_get(buffer);
-       dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
-       if (IS_ERR(dmabuf)) {
-               ion_buffer_put(buffer);
-               return dmabuf;
-       }
-
-       return dmabuf;
-}
-EXPORT_SYMBOL(ion_share_dma_buf);
-
-int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
-{
-       struct dma_buf *dmabuf;
-       int fd;
-
-       dmabuf = ion_share_dma_buf(client, handle);
-       if (IS_ERR(dmabuf))
-               return PTR_ERR(dmabuf);
-
-       fd = dma_buf_fd(dmabuf, O_CLOEXEC);
-       if (fd < 0)
-               dma_buf_put(dmabuf);
-
-       return fd;
-}
-EXPORT_SYMBOL(ion_share_dma_buf_fd);
-
-struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
-{
-       struct dma_buf *dmabuf;
-       struct ion_buffer *buffer;
-       struct ion_handle *handle;
-       int ret;
-
-       dmabuf = dma_buf_get(fd);
-       if (IS_ERR(dmabuf))
-               return ERR_PTR(PTR_ERR(dmabuf));
-       /* if this memory came from ion */
-
-       if (dmabuf->ops != &dma_buf_ops) {
-               pr_err("%s: can not import dmabuf from another exporter\n",
-                      __func__);
-               dma_buf_put(dmabuf);
-               return ERR_PTR(-EINVAL);
-       }
-       buffer = dmabuf->priv;
-
-       mutex_lock(&client->lock);
-       /* if a handle exists for this buffer just take a reference to it */
-       handle = ion_handle_lookup(client, buffer);
-       if (!IS_ERR(handle)) {
-               ion_handle_get(handle);
-               goto end;
-       }
-       handle = ion_handle_create(client, buffer);
-       if (IS_ERR(handle))
-               goto end;
-       ret = ion_handle_add(client, handle);
-       if (ret) {
-               ion_handle_put(handle);
-               handle = ERR_PTR(ret);
-       }
-end:
-       mutex_unlock(&client->lock);
-       dma_buf_put(dmabuf);
-       return handle;
-}
-EXPORT_SYMBOL(ion_import_dma_buf);
-
-static int ion_sync_for_device(struct ion_client *client, int fd)
-{
-       struct dma_buf *dmabuf;
-       struct ion_buffer *buffer;
-
-       dmabuf = dma_buf_get(fd);
-       if (IS_ERR(dmabuf))
-               return PTR_ERR(dmabuf);
-
-       /* if this memory came from ion */
-       if (dmabuf->ops != &dma_buf_ops) {
-               pr_err("%s: can not sync dmabuf from another exporter\n",
-                      __func__);
-               dma_buf_put(dmabuf);
-               return -EINVAL;
-       }
-       buffer = dmabuf->priv;
-
-       dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
-                              buffer->sg_table->nents, DMA_BIDIRECTIONAL);
-       dma_buf_put(dmabuf);
-       return 0;
-}
-
-static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-       struct ion_client *client = filp->private_data;
-
-       switch (cmd) {
-       case ION_IOC_ALLOC:
-       {
-               struct ion_allocation_data data;
-               struct ion_handle *handle;
-
-               if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
-                       return -EFAULT;
-               handle = ion_alloc(client, data.len, data.align,
-                                            data.heap_id_mask, data.flags);
-
-               if (IS_ERR(handle))
-                       return PTR_ERR(handle);
-
-               data.handle = handle->id;
-
-               if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
-                       ion_free(client, handle);
-                       return -EFAULT;
-               }
-               break;
-       }
-       case ION_IOC_FREE:
-       {
-               struct ion_handle_data data;
-               struct ion_handle *handle;
-
-               if (copy_from_user(&data, (void __user *)arg,
-                                  sizeof(struct ion_handle_data)))
-                       return -EFAULT;
-               mutex_lock(&client->lock);
-               handle = ion_uhandle_get(client, data.handle);
-               mutex_unlock(&client->lock);
-               if (!handle)
-                       return -EINVAL;
-               ion_free(client, handle);
-               break;
-       }
-       case ION_IOC_SHARE:
-       case ION_IOC_MAP:
-       {
-               struct ion_fd_data data;
-               struct ion_handle *handle;
-
-               if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
-                       return -EFAULT;
-               handle = ion_uhandle_get(client, data.handle);
-               data.fd = ion_share_dma_buf_fd(client, handle);
-               if (copy_to_user((void __user *)arg, &data, sizeof(data)))
-                       return -EFAULT;
-               if (data.fd < 0)
-                       return data.fd;
-               break;
-       }
-       case ION_IOC_IMPORT:
-       {
-               struct ion_fd_data data;
-               struct ion_handle *handle;
-               int ret = 0;
-               if (copy_from_user(&data, (void __user *)arg,
-                                  sizeof(struct ion_fd_data)))
-                       return -EFAULT;
-               handle = ion_import_dma_buf(client, data.fd);
-               if (IS_ERR(handle))
-                       ret = PTR_ERR(handle);
-               else
-                       data.handle = handle->id;
-
-               if (copy_to_user((void __user *)arg, &data,
-                                sizeof(struct ion_fd_data)))
-                       return -EFAULT;
-               if (ret < 0)
-                       return ret;
-               break;
-       }
-       case ION_IOC_SYNC:
-       {
-               struct ion_fd_data data;
-               if (copy_from_user(&data, (void __user *)arg,
-                                  sizeof(struct ion_fd_data)))
-                       return -EFAULT;
-               ion_sync_for_device(client, data.fd);
-               break;
-       }
-       case ION_IOC_CUSTOM:
-       {
-               struct ion_device *dev = client->dev;
-               struct ion_custom_data data;
-
-               if (!dev->custom_ioctl)
-                       return -ENOTTY;
-               if (copy_from_user(&data, (void __user *)arg,
-                               sizeof(struct ion_custom_data)))
-                       return -EFAULT;
-               return dev->custom_ioctl(client, data.cmd, data.arg);
-       }
-       default:
-               return -ENOTTY;
-       }
-       return 0;
-}
-
-static int ion_release(struct inode *inode, struct file *file)
-{
-       struct ion_client *client = file->private_data;
-
-       pr_debug("%s: %d\n", __func__, __LINE__);
-       ion_client_destroy(client);
-       return 0;
-}
-
-static int ion_open(struct inode *inode, struct file *file)
-{
-       struct miscdevice *miscdev = file->private_data;
-       struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
-       struct ion_client *client;
-
-       pr_debug("%s: %d\n", __func__, __LINE__);
-       client = ion_client_create(dev, "user");
-       if (IS_ERR(client))
-               return PTR_ERR(client);
-       file->private_data = client;
-
-       return 0;
-}
-
-static const struct file_operations ion_fops = {
-       .owner          = THIS_MODULE,
-       .open           = ion_open,
-       .release        = ion_release,
-       .unlocked_ioctl = ion_ioctl,
-       .compat_ioctl   = compat_ion_ioctl,
-};
-
-static size_t ion_debug_heap_total(struct ion_client *client,
-                                  unsigned int id)
-{
-       size_t size = 0;
-       struct rb_node *n;
-
-       mutex_lock(&client->lock);
-       for (n = rb_first(&client->handles); n; n = rb_next(n)) {
-               struct ion_handle *handle = rb_entry(n,
-                                                    struct ion_handle,
-                                                    node);
-               if (handle->buffer->heap->id == id)
-                       size += handle->buffer->size;
-       }
-       mutex_unlock(&client->lock);
-       return size;
-}
-
-static int ion_debug_heap_show(struct seq_file *s, void *unused)
-{
-       struct ion_heap *heap = s->private;
-       struct ion_device *dev = heap->dev;
-       struct rb_node *n;
-       size_t total_size = 0;
-       size_t total_orphaned_size = 0;
-
-       seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
-       seq_printf(s, "----------------------------------------------------\n");
-
-       for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
-               struct ion_client *client = rb_entry(n, struct ion_client,
-                                                    node);
-               size_t size = ion_debug_heap_total(client, heap->id);
-               if (!size)
-                       continue;
-               if (client->task) {
-                       char task_comm[TASK_COMM_LEN];
-
-                       get_task_comm(task_comm, client->task);
-                       seq_printf(s, "%16.s %16u %16u\n", task_comm,
-                                  client->pid, size);
-               } else {
-                       seq_printf(s, "%16.s %16u %16u\n", client->name,
-                                  client->pid, size);
-               }
-       }
-       seq_printf(s, "----------------------------------------------------\n");
-       seq_printf(s, "orphaned allocations (info is from last known client):"
-                  "\n");
-       mutex_lock(&dev->buffer_lock);
-       for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
-               struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
-                                                    node);
-               if (buffer->heap->id != heap->id)
-                       continue;
-               total_size += buffer->size;
-               if (!buffer->handle_count) {
-                       seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
-                                  buffer->pid, buffer->size, buffer->kmap_cnt,
-                                  atomic_read(&buffer->ref.refcount));
-                       total_orphaned_size += buffer->size;
-               }
-       }
-       mutex_unlock(&dev->buffer_lock);
-       seq_printf(s, "----------------------------------------------------\n");
-       seq_printf(s, "%16.s %16u\n", "total orphaned",
-                  total_orphaned_size);
-       seq_printf(s, "%16.s %16u\n", "total ", total_size);
-       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
-               seq_printf(s, "%16.s %16u\n", "deferred free",
-                               heap->free_list_size);
-       seq_printf(s, "----------------------------------------------------\n");
-
-       if (heap->debug_show)
-               heap->debug_show(heap, s, unused);
-
-       return 0;
-}
-
-static int ion_debug_heap_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, ion_debug_heap_show, inode->i_private);
-}
-
-static const struct file_operations debug_heap_fops = {
-       .open = ion_debug_heap_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-#ifdef DEBUG_HEAP_SHRINKER
-static int debug_shrink_set(void *data, u64 val)
-{
-        struct ion_heap *heap = data;
-        struct shrink_control sc;
-        int objs;
-
-        sc.gfp_mask = -1;
-        sc.nr_to_scan = 0;
-
-        if (!val)
-                return 0;
-
-        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
-        sc.nr_to_scan = objs;
-
-        heap->shrinker.shrink(&heap->shrinker, &sc);
-        return 0;
-}
-
-static int debug_shrink_get(void *data, u64 *val)
-{
-        struct ion_heap *heap = data;
-        struct shrink_control sc;
-        int objs;
-
-        sc.gfp_mask = -1;
-        sc.nr_to_scan = 0;
-
-        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
-        *val = objs;
-        return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
-                        debug_shrink_set, "%llu\n");
-#endif
-
-void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
-{
-       if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
-           !heap->ops->unmap_dma)
-               pr_err("%s: can not add heap with invalid ops struct.\n",
-                      __func__);
-
-       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
-               ion_heap_init_deferred_free(heap);
-
-       heap->dev = dev;
-       down_write(&dev->lock);
-       /* use negative heap->id to reverse the priority -- when traversing
-          the list later attempt higher id numbers first */
-       plist_node_init(&heap->node, -heap->id);
-       plist_add(&heap->node, &dev->heaps);
-       debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
-                           &debug_heap_fops);
-#ifdef DEBUG_HEAP_SHRINKER
-       if (heap->shrinker.shrink) {
-               char debug_name[64];
-
-               snprintf(debug_name, 64, "%s_shrink", heap->name);
-               debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
-                                   &debug_shrink_fops);
-       }
-#endif
-       up_write(&dev->lock);
-}
-
-struct ion_device *ion_device_create(long (*custom_ioctl)
-                                    (struct ion_client *client,
-                                     unsigned int cmd,
-                                     unsigned long arg))
-{
-       struct ion_device *idev;
-       int ret;
-
-       idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
-       if (!idev)
-               return ERR_PTR(-ENOMEM);
-
-       idev->dev.minor = MISC_DYNAMIC_MINOR;
-       idev->dev.name = "ion";
-       idev->dev.fops = &ion_fops;
-       idev->dev.parent = NULL;
-       ret = misc_register(&idev->dev);
-       if (ret) {
-               pr_err("ion: failed to register misc device.\n");
-               return ERR_PTR(ret);
-       }
-
-       idev->debug_root = debugfs_create_dir("ion", NULL);
-       if (!idev->debug_root)
-               pr_err("ion: failed to create debug files.\n");
-
-       idev->custom_ioctl = custom_ioctl;
-       idev->buffers = RB_ROOT;
-       mutex_init(&idev->buffer_lock);
-       init_rwsem(&idev->lock);
-       plist_head_init(&idev->heaps);
-       idev->clients = RB_ROOT;
-       return idev;
-}
-
-void ion_device_destroy(struct ion_device *dev)
-{
-       misc_deregister(&dev->dev);
-       /* XXX need to free the heaps and clients ? */
-       kfree(dev);
-}
-
-void __init ion_reserve(struct ion_platform_data *data)
-{
-       int i;
-
-       for (i = 0; i < data->nr; i++) {
-               if (data->heaps[i].size == 0)
-                       continue;
-
-               if (data->heaps[i].base == 0) {
-                       phys_addr_t paddr;
-                       paddr = memblock_alloc_base(data->heaps[i].size,
-                                                   data->heaps[i].align,
-                                                   MEMBLOCK_ALLOC_ANYWHERE);
-                       if (!paddr) {
-                               pr_err("%s: error allocating memblock for "
-                                      "heap %d\n",
-                                       __func__, i);
-                               continue;
-                       }
-                       data->heaps[i].base = paddr;
-               } else {
-                       int ret = memblock_reserve(data->heaps[i].base,
-                                              data->heaps[i].size);
-                       if (ret)
-                               pr_err("memblock reserve of %x@%lx failed\n",
-                                      data->heaps[i].size,
-                                      data->heaps[i].base);
-               }
-               pr_info("%s: %s reserved base %lx size %d\n", __func__,
-                       data->heaps[i].name,
-                       data->heaps[i].base,
-                       data->heaps[i].size);
-       }
-}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
deleted file mode 100644 (file)
index 86f3554..0000000
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * drivers/gpu/ion/ion_carveout_heap.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/spinlock.h>
-
-#include <linux/err.h>
-#include <linux/genalloc.h>
-#include <linux/io.h>
-#include <linux/ion.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "ion_priv.h"
-
-#include <asm/mach/map.h>
-
-struct ion_carveout_heap {
-       struct ion_heap heap;
-       struct gen_pool *pool;
-       ion_phys_addr_t base;
-};
-
-ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
-                                     unsigned long size,
-                                     unsigned long align)
-{
-       struct ion_carveout_heap *carveout_heap =
-               container_of(heap, struct ion_carveout_heap, heap);
-       unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
-
-       if (!offset)
-               return ION_CARVEOUT_ALLOCATE_FAIL;
-
-       return offset;
-}
-
-void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
-                      unsigned long size)
-{
-       struct ion_carveout_heap *carveout_heap =
-               container_of(heap, struct ion_carveout_heap, heap);
-
-       if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
-               return;
-       gen_pool_free(carveout_heap->pool, addr, size);
-}
-
-static int ion_carveout_heap_phys(struct ion_heap *heap,
-                                 struct ion_buffer *buffer,
-                                 ion_phys_addr_t *addr, size_t *len)
-{
-       *addr = buffer->priv_phys;
-       *len = buffer->size;
-       return 0;
-}
-
-static int ion_carveout_heap_allocate(struct ion_heap *heap,
-                                     struct ion_buffer *buffer,
-                                     unsigned long size, unsigned long align,
-                                     unsigned long flags)
-{
-       buffer->priv_phys = ion_carveout_allocate(heap, size, align);
-       return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
-}
-
-static void ion_carveout_heap_free(struct ion_buffer *buffer)
-{
-       struct ion_heap *heap = buffer->heap;
-
-       ion_carveout_free(heap, buffer->priv_phys, buffer->size);
-       buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
-}
-
-struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
-                                             struct ion_buffer *buffer)
-{
-       struct sg_table *table;
-       int ret;
-
-       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!table)
-               return ERR_PTR(-ENOMEM);
-       ret = sg_alloc_table(table, 1, GFP_KERNEL);
-       if (ret) {
-               kfree(table);
-               return ERR_PTR(ret);
-       }
-       sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
-                   0);
-       return table;
-}
-
-void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
-                                struct ion_buffer *buffer)
-{
-       sg_free_table(buffer->sg_table);
-}
-
-void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
-                                  struct ion_buffer *buffer)
-{
-       void *ret;
-       int mtype = MT_MEMORY_NONCACHED;
-
-       if (buffer->flags & ION_FLAG_CACHED)
-               mtype = MT_MEMORY;
-
-       ret = __arm_ioremap(buffer->priv_phys, buffer->size,
-                             mtype);
-       if (ret == NULL)
-               return ERR_PTR(-ENOMEM);
-
-       return ret;
-}
-
-void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
-                                   struct ion_buffer *buffer)
-{
-       __arm_iounmap(buffer->vaddr);
-       buffer->vaddr = NULL;
-       return;
-}
-
-int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
-                              struct vm_area_struct *vma)
-{
-       return remap_pfn_range(vma, vma->vm_start,
-                              __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
-                              vma->vm_end - vma->vm_start,
-                              pgprot_noncached(vma->vm_page_prot));
-}
-
-static struct ion_heap_ops carveout_heap_ops = {
-       .allocate = ion_carveout_heap_allocate,
-       .free = ion_carveout_heap_free,
-       .phys = ion_carveout_heap_phys,
-       .map_dma = ion_carveout_heap_map_dma,
-       .unmap_dma = ion_carveout_heap_unmap_dma,
-       .map_user = ion_carveout_heap_map_user,
-       .map_kernel = ion_carveout_heap_map_kernel,
-       .unmap_kernel = ion_carveout_heap_unmap_kernel,
-};
-
-struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
-{
-       struct ion_carveout_heap *carveout_heap;
-
-       carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
-       if (!carveout_heap)
-               return ERR_PTR(-ENOMEM);
-
-       carveout_heap->pool = gen_pool_create(12, -1);
-       if (!carveout_heap->pool) {
-               kfree(carveout_heap);
-               return ERR_PTR(-ENOMEM);
-       }
-       carveout_heap->base = heap_data->base;
-       gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
-                    -1);
-       carveout_heap->heap.ops = &carveout_heap_ops;
-       carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
-
-       return &carveout_heap->heap;
-}
-
-void ion_carveout_heap_destroy(struct ion_heap *heap)
-{
-       struct ion_carveout_heap *carveout_heap =
-            container_of(heap, struct  ion_carveout_heap, heap);
-
-       gen_pool_destroy(carveout_heap->pool);
-       kfree(carveout_heap);
-       carveout_heap = NULL;
-}
diff --git a/drivers/gpu/ion/ion_chunk_heap.c b/drivers/gpu/ion/ion_chunk_heap.c
deleted file mode 100644 (file)
index cd01aad..0000000
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * drivers/gpu/ion/ion_chunk_heap.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-//#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/genalloc.h>
-#include <linux/io.h>
-#include <linux/ion.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "ion_priv.h"
-
-#include <asm/mach/map.h>
-
-struct ion_chunk_heap {
-       struct ion_heap heap;
-       struct gen_pool *pool;
-       ion_phys_addr_t base;
-       unsigned long chunk_size;
-       unsigned long size;
-       unsigned long allocated;
-};
-
-static int ion_chunk_heap_allocate(struct ion_heap *heap,
-                                     struct ion_buffer *buffer,
-                                     unsigned long size, unsigned long align,
-                                     unsigned long flags)
-{
-       struct ion_chunk_heap *chunk_heap =
-               container_of(heap, struct ion_chunk_heap, heap);
-       struct sg_table *table;
-       struct scatterlist *sg;
-       int ret, i;
-       unsigned long num_chunks;
-       unsigned long allocated_size;
-
-       if (ion_buffer_fault_user_mappings(buffer))
-               return -ENOMEM;
-
-       allocated_size = ALIGN(size, chunk_heap->chunk_size);
-       num_chunks = allocated_size / chunk_heap->chunk_size;
-
-       if (allocated_size > chunk_heap->size - chunk_heap->allocated)
-               return -ENOMEM;
-
-       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!table)
-               return -ENOMEM;
-       ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
-       if (ret) {
-               kfree(table);
-               return ret;
-       }
-
-       sg = table->sgl;
-       for (i = 0; i < num_chunks; i++) {
-               unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
-                                                    chunk_heap->chunk_size);
-               if (!paddr)
-                       goto err;
-               sg_set_page(sg, phys_to_page(paddr), chunk_heap->chunk_size, 0);
-               sg = sg_next(sg);
-       }
-
-       buffer->priv_virt = table;
-       chunk_heap->allocated += allocated_size;
-       return 0;
-err:
-       sg = table->sgl;
-       for (i -= 1; i >= 0; i--) {
-               gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
-                             sg_dma_len(sg));
-               sg = sg_next(sg);
-       }
-       sg_free_table(table);
-       kfree(table);
-       return -ENOMEM;
-}
-
-static void ion_chunk_heap_free(struct ion_buffer *buffer)
-{
-       struct ion_heap *heap = buffer->heap;
-       struct ion_chunk_heap *chunk_heap =
-               container_of(heap, struct ion_chunk_heap, heap);
-       struct sg_table *table = buffer->priv_virt;
-       struct scatterlist *sg;
-       int i;
-       unsigned long allocated_size;
-
-       allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
-
-       ion_heap_buffer_zero(buffer);
-
-       for_each_sg(table->sgl, sg, table->nents, i) {
-               if (ion_buffer_cached(buffer))
-                       arm_dma_ops.sync_single_for_device(NULL,
-                               pfn_to_dma(NULL, page_to_pfn(sg_page(sg))),
-                               sg_dma_len(sg), DMA_BIDIRECTIONAL);
-               gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
-                             sg_dma_len(sg));
-       }
-       chunk_heap->allocated -= allocated_size;
-       sg_free_table(table);
-       kfree(table);
-}
-
-struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
-                                        struct ion_buffer *buffer)
-{
-       return buffer->priv_virt;
-}
-
-void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
-                              struct ion_buffer *buffer)
-{
-       return;
-}
-
-static struct ion_heap_ops chunk_heap_ops = {
-       .allocate = ion_chunk_heap_allocate,
-       .free = ion_chunk_heap_free,
-       .map_dma = ion_chunk_heap_map_dma,
-       .unmap_dma = ion_chunk_heap_unmap_dma,
-       .map_user = ion_heap_map_user,
-       .map_kernel = ion_heap_map_kernel,
-       .unmap_kernel = ion_heap_unmap_kernel,
-};
-
-struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
-{
-       struct ion_chunk_heap *chunk_heap;
-       struct vm_struct *vm_struct;
-       pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
-       int i, ret;
-
-
-       chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
-       if (!chunk_heap)
-               return ERR_PTR(-ENOMEM);
-
-       chunk_heap->chunk_size = (unsigned long)heap_data->priv;
-       chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
-                                          PAGE_SHIFT, -1);
-       if (!chunk_heap->pool) {
-               ret = -ENOMEM;
-               goto error_gen_pool_create;
-       }
-       chunk_heap->base = heap_data->base;
-       chunk_heap->size = heap_data->size;
-       chunk_heap->allocated = 0;
-
-       vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
-       if (!vm_struct) {
-               ret = -ENOMEM;
-               goto error;
-       }
-       for (i = 0; i < chunk_heap->size; i += PAGE_SIZE) {
-               struct page *page = phys_to_page(chunk_heap->base + i);
-               struct page **pages = &page;
-
-               ret = map_vm_area(vm_struct, pgprot, &pages);
-               if (ret)
-                       goto error_map_vm_area;
-               memset(vm_struct->addr, 0, PAGE_SIZE);
-               unmap_kernel_range((unsigned long)vm_struct->addr, PAGE_SIZE);
-       }
-       free_vm_area(vm_struct);
-
-       arm_dma_ops.sync_single_for_device(NULL,
-               pfn_to_dma(NULL, page_to_pfn(phys_to_page(heap_data->base))),
-               heap_data->size, DMA_BIDIRECTIONAL);
-       gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
-       chunk_heap->heap.ops = &chunk_heap_ops;
-       chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
-       chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
-       pr_info("%s: base %lu size %zu align %ld\n", __func__,
-                         chunk_heap->base, heap_data->size, heap_data->align);
-
-       return &chunk_heap->heap;
-
-error_map_vm_area:
-       free_vm_area(vm_struct);
-error:
-       gen_pool_destroy(chunk_heap->pool);
-error_gen_pool_create:
-       kfree(chunk_heap);
-       return ERR_PTR(ret);
-}
-
-void ion_chunk_heap_destroy(struct ion_heap *heap)
-{
-       struct ion_chunk_heap *chunk_heap =
-            container_of(heap, struct  ion_chunk_heap, heap);
-
-       gen_pool_destroy(chunk_heap->pool);
-       kfree(chunk_heap);
-       chunk_heap = NULL;
-}
diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c
deleted file mode 100644 (file)
index 1eaa8c1..0000000
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * drivers/gpu/ion/ion_cma_heap.c
- *
- * Copyright (C) Linaro 2012
- * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/device.h>
-#include <linux/ion.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/dma-mapping.h>
-
-/* for ion_heap_ops structure */
-#include "ion_priv.h"
-
-#define ION_CMA_ALLOCATE_FAILED -1
-
-struct ion_cma_heap {
-       struct ion_heap heap;
-       struct device *dev;
-};
-
-#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
-
-struct ion_cma_buffer_info {
-       void *cpu_addr;
-       dma_addr_t handle;
-       struct sg_table *table;
-};
-
-/*
- * Create scatter-list for the already allocated DMA buffer.
- * This function could be replaced by dma_common_get_sgtable
- * as soon as it will avalaible.
- */
-int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
-                       void *cpu_addr, dma_addr_t handle, size_t size)
-{
-       struct page *page = virt_to_page(cpu_addr);
-       int ret;
-
-       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
-       if (unlikely(ret))
-               return ret;
-
-       sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
-       return 0;
-}
-
-/* ION CMA heap operations functions */
-static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
-                           unsigned long len, unsigned long align,
-                           unsigned long flags)
-{
-       struct ion_cma_heap *cma_heap = to_cma_heap(heap);
-       struct device *dev = cma_heap->dev;
-       struct ion_cma_buffer_info *info;
-
-       dev_dbg(dev, "Request buffer allocation len %ld\n", len);
-
-       info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
-       if (!info) {
-               dev_err(dev, "Can't allocate buffer info\n");
-               return ION_CMA_ALLOCATE_FAILED;
-       }
-
-       info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), 0);
-
-       if (!info->cpu_addr) {
-               dev_err(dev, "Fail to allocate buffer\n");
-               goto err;
-       }
-
-       info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!info->table) {
-               dev_err(dev, "Fail to allocate sg table\n");
-               goto free_mem;
-       }
-
-       if (ion_cma_get_sgtable
-           (dev, info->table, info->cpu_addr, info->handle, len))
-               goto free_table;
-       /* keep this for memory release */
-       buffer->priv_virt = info;
-       dev_dbg(dev, "Allocate buffer %p\n", buffer);
-       return 0;
-
-free_table:
-       kfree(info->table);
-free_mem:
-       dma_free_coherent(dev, len, info->cpu_addr, info->handle);
-err:
-       kfree(info);
-       return ION_CMA_ALLOCATE_FAILED;
-}
-
-static void ion_cma_free(struct ion_buffer *buffer)
-{
-       struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
-       struct device *dev = cma_heap->dev;
-       struct ion_cma_buffer_info *info = buffer->priv_virt;
-
-       dev_dbg(dev, "Release buffer %p\n", buffer);
-       /* release memory */
-       dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
-       /* release sg table */
-       sg_free_table(info->table);
-       kfree(info->table);
-       kfree(info);
-}
-
-/* return physical address in addr */
-static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
-                       ion_phys_addr_t *addr, size_t *len)
-{
-       struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
-       struct device *dev = cma_heap->dev;
-       struct ion_cma_buffer_info *info = buffer->priv_virt;
-
-       dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
-               info->handle);
-
-       *addr = info->handle;
-       *len = buffer->size;
-
-       return 0;
-}
-
-struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
-                                        struct ion_buffer *buffer)
-{
-       struct ion_cma_buffer_info *info = buffer->priv_virt;
-
-       return info->table;
-}
-
-void ion_cma_heap_unmap_dma(struct ion_heap *heap,
-                              struct ion_buffer *buffer)
-{
-       return;
-}
-
-static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
-                       struct vm_area_struct *vma)
-{
-       struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
-       struct device *dev = cma_heap->dev;
-       struct ion_cma_buffer_info *info = buffer->priv_virt;
-
-       return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
-                                buffer->size);
-}
-
-void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
-{
-       struct ion_cma_buffer_info *info = buffer->priv_virt;
-       /* kernel memory mapping has been done at allocation time */
-       return info->cpu_addr;
-}
-
-static struct ion_heap_ops ion_cma_ops = {
-       .allocate = ion_cma_allocate,
-       .free = ion_cma_free,
-       .map_dma = ion_cma_heap_map_dma,
-       .unmap_dma = ion_cma_heap_unmap_dma,
-       .phys = ion_cma_phys,
-       .map_user = ion_cma_mmap,
-       .map_kernel = ion_cma_map_kernel,
-};
-
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
-{
-       struct ion_cma_heap *cma_heap;
-
-       cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
-
-       if (!cma_heap)
-               return ERR_PTR(-ENOMEM);
-
-       cma_heap->heap.ops = &ion_cma_ops;
-       /* get device from private heaps data, later it will be
-        * used to make the link with reserved CMA memory */
-       cma_heap->dev = data->priv;
-       cma_heap->heap.type = ION_HEAP_TYPE_DMA;
-       return &cma_heap->heap;
-}
-
-void ion_cma_heap_destroy(struct ion_heap *heap)
-{
-       struct ion_cma_heap *cma_heap = to_cma_heap(heap);
-
-       kfree(cma_heap);
-}
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
deleted file mode 100644 (file)
index 786302d..0000000
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- * drivers/gpu/ion/ion_heap.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/freezer.h>
-#include <linux/ion.h>
-#include <linux/kthread.h>
-#include <linux/mm.h>
-#include <linux/rtmutex.h>
-#include <linux/sched.h>
-#include <linux/scatterlist.h>
-#include <linux/vmalloc.h>
-#include "ion_priv.h"
-
-void *ion_heap_map_kernel(struct ion_heap *heap,
-                         struct ion_buffer *buffer)
-{
-       struct scatterlist *sg;
-       int i, j;
-       void *vaddr;
-       pgprot_t pgprot;
-       struct sg_table *table = buffer->sg_table;
-       int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
-       struct page **pages = vmalloc(sizeof(struct page *) * npages);
-       struct page **tmp = pages;
-
-       if (!pages)
-               return 0;
-
-       if (buffer->flags & ION_FLAG_CACHED)
-               pgprot = PAGE_KERNEL;
-       else
-               pgprot = pgprot_writecombine(PAGE_KERNEL);
-
-       for_each_sg(table->sgl, sg, table->nents, i) {
-               int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
-               struct page *page = sg_page(sg);
-               BUG_ON(i >= npages);
-               for (j = 0; j < npages_this_entry; j++) {
-                       *(tmp++) = page++;
-               }
-       }
-       vaddr = vmap(pages, npages, VM_MAP, pgprot);
-       vfree(pages);
-
-       if (vaddr == NULL)
-               return ERR_PTR(-ENOMEM);
-
-       return vaddr;
-}
-
-void ion_heap_unmap_kernel(struct ion_heap *heap,
-                          struct ion_buffer *buffer)
-{
-       vunmap(buffer->vaddr);
-}
-
-int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
-                     struct vm_area_struct *vma)
-{
-       struct sg_table *table = buffer->sg_table;
-       unsigned long addr = vma->vm_start;
-       unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
-       struct scatterlist *sg;
-       int i;
-
-       for_each_sg(table->sgl, sg, table->nents, i) {
-               struct page *page = sg_page(sg);
-               unsigned long remainder = vma->vm_end - addr;
-               unsigned long len = sg_dma_len(sg);
-
-               if (offset >= sg_dma_len(sg)) {
-                       offset -= sg_dma_len(sg);
-                       continue;
-               } else if (offset) {
-                       page += offset / PAGE_SIZE;
-                       len = sg_dma_len(sg) - offset;
-                       offset = 0;
-               }
-               len = min(len, remainder);
-               remap_pfn_range(vma, addr, page_to_pfn(page), len,
-                               vma->vm_page_prot);
-               addr += len;
-               if (addr >= vma->vm_end)
-                       return 0;
-       }
-       return 0;
-}
-
-int ion_heap_buffer_zero(struct ion_buffer *buffer)
-{
-       struct sg_table *table = buffer->sg_table;
-       pgprot_t pgprot;
-       struct scatterlist *sg;
-       struct vm_struct *vm_struct;
-       int i, j, ret = 0;
-
-       if (buffer->flags & ION_FLAG_CACHED)
-               pgprot = PAGE_KERNEL;
-       else
-               pgprot = pgprot_writecombine(PAGE_KERNEL);
-
-       vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
-       if (!vm_struct)
-               return -ENOMEM;
-
-       for_each_sg(table->sgl, sg, table->nents, i) {
-               struct page *page = sg_page(sg);
-               unsigned long len = sg_dma_len(sg);
-
-               for (j = 0; j < len / PAGE_SIZE; j++) {
-                       struct page *sub_page = page + j;
-                       struct page **pages = &sub_page;
-                       ret = map_vm_area(vm_struct, pgprot, &pages);
-                       if (ret)
-                               goto end;
-                       memset(vm_struct->addr, 0, PAGE_SIZE);
-                       unmap_kernel_range((unsigned long)vm_struct->addr,
-                                          PAGE_SIZE);
-               }
-       }
-end:
-       free_vm_area(vm_struct);
-       return ret;
-}
-
-struct page *ion_heap_alloc_pages(struct ion_buffer *buffer, gfp_t gfp_flags,
-                                 unsigned int order)
-{
-       struct page *page = alloc_pages(gfp_flags, order);
-
-       if (!page)
-               return page;
-
-       if (ion_buffer_fault_user_mappings(buffer))
-               split_page(page, order);
-
-       return page;
-}
-
-void ion_heap_free_pages(struct ion_buffer *buffer, struct page *page,
-                        unsigned int order)
-{
-       int i;
-
-       if (!ion_buffer_fault_user_mappings(buffer)) {
-               __free_pages(page, order);
-               return;
-       }
-       for (i = 0; i < (1 << order); i++)
-               __free_page(page + i);
-}
-
-void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
-{
-       rt_mutex_lock(&heap->lock);
-       list_add(&buffer->list, &heap->free_list);
-       heap->free_list_size += buffer->size;
-       rt_mutex_unlock(&heap->lock);
-       wake_up(&heap->waitqueue);
-}
-
-size_t ion_heap_freelist_size(struct ion_heap *heap)
-{
-       size_t size;
-
-       rt_mutex_lock(&heap->lock);
-       size = heap->free_list_size;
-       rt_mutex_unlock(&heap->lock);
-
-       return size;
-}
-
-size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
-{
-       struct ion_buffer *buffer, *tmp;
-       size_t total_drained = 0;
-
-       if (ion_heap_freelist_size(heap) == 0)
-               return 0;
-
-       rt_mutex_lock(&heap->lock);
-       if (size == 0)
-               size = heap->free_list_size;
-
-       list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
-               if (total_drained >= size)
-                       break;
-               list_del(&buffer->list);
-               ion_buffer_destroy(buffer);
-               heap->free_list_size -= buffer->size;
-               total_drained += buffer->size;
-       }
-       rt_mutex_unlock(&heap->lock);
-
-       return total_drained;
-}
-
-int ion_heap_deferred_free(void *data)
-{
-       struct ion_heap *heap = data;
-
-       while (true) {
-               struct ion_buffer *buffer;
-
-               wait_event_freezable(heap->waitqueue,
-                                    ion_heap_freelist_size(heap) > 0);
-
-               rt_mutex_lock(&heap->lock);
-               if (list_empty(&heap->free_list)) {
-                       rt_mutex_unlock(&heap->lock);
-                       continue;
-               }
-               buffer = list_first_entry(&heap->free_list, struct ion_buffer,
-                                         list);
-               list_del(&buffer->list);
-               heap->free_list_size -= buffer->size;
-               rt_mutex_unlock(&heap->lock);
-               ion_buffer_destroy(buffer);
-       }
-
-       return 0;
-}
-
-int ion_heap_init_deferred_free(struct ion_heap *heap)
-{
-       struct sched_param param = { .sched_priority = 0 };
-
-       INIT_LIST_HEAD(&heap->free_list);
-       heap->free_list_size = 0;
-       rt_mutex_init(&heap->lock);
-       init_waitqueue_head(&heap->waitqueue);
-       heap->task = kthread_run(ion_heap_deferred_free, heap,
-                                "%s", heap->name);
-       sched_setscheduler(heap->task, SCHED_IDLE, &param);
-       if (IS_ERR(heap->task)) {
-               pr_err("%s: creating thread for deferred free failed\n",
-                      __func__);
-               return PTR_RET(heap->task);
-       }
-       return 0;
-}
-
-struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
-{
-       struct ion_heap *heap = NULL;
-
-       switch (heap_data->type) {
-       case ION_HEAP_TYPE_SYSTEM_CONTIG:
-               heap = ion_system_contig_heap_create(heap_data);
-               break;
-       case ION_HEAP_TYPE_SYSTEM:
-               heap = ion_system_heap_create(heap_data);
-               break;
-       case ION_HEAP_TYPE_CARVEOUT:
-               heap = ion_carveout_heap_create(heap_data);
-               break;
-       case ION_HEAP_TYPE_CHUNK:
-               heap = ion_chunk_heap_create(heap_data);
-               break;
-       case ION_HEAP_TYPE_DMA:
-               heap = ion_cma_heap_create(heap_data);
-               break;
-       default:
-               pr_err("%s: Invalid heap type %d\n", __func__,
-                      heap_data->type);
-               return ERR_PTR(-EINVAL);
-       }
-
-       if (IS_ERR_OR_NULL(heap)) {
-               pr_err("%s: error creating heap %s type %d base %lu size %u\n",
-                      __func__, heap_data->name, heap_data->type,
-                      heap_data->base, heap_data->size);
-               return ERR_PTR(-EINVAL);
-       }
-
-       heap->name = heap_data->name;
-       heap->id = heap_data->id;
-       return heap;
-}
-
-void ion_heap_destroy(struct ion_heap *heap)
-{
-       if (!heap)
-               return;
-
-       switch (heap->type) {
-       case ION_HEAP_TYPE_SYSTEM_CONTIG:
-               ion_system_contig_heap_destroy(heap);
-               break;
-       case ION_HEAP_TYPE_SYSTEM:
-               ion_system_heap_destroy(heap);
-               break;
-       case ION_HEAP_TYPE_CARVEOUT:
-               ion_carveout_heap_destroy(heap);
-               break;
-       case ION_HEAP_TYPE_CHUNK:
-               ion_chunk_heap_destroy(heap);
-               break;
-       case ION_HEAP_TYPE_DMA:
-               ion_cma_heap_destroy(heap);
-               break;
-       default:
-               pr_err("%s: Invalid heap type %d\n", __func__,
-                      heap->type);
-       }
-}
diff --git a/drivers/gpu/ion/ion_page_pool.c b/drivers/gpu/ion/ion_page_pool.c
deleted file mode 100644 (file)
index 7e00f51..0000000
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * drivers/gpu/ion/ion_mem_pool.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/debugfs.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "ion_priv.h"
-
-struct ion_page_pool_item {
-       struct page *page;
-       struct list_head list;
-};
-
-static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
-{
-       struct page *page = alloc_pages(pool->gfp_mask, pool->order);
-
-       if (!page)
-               return NULL;
-       /* this is only being used to flush the page for dma,
-          this api is not really suitable for calling from a driver
-          but no better way to flush a page for dma exist at this time */
-       arm_dma_ops.sync_single_for_device(NULL,
-                                          pfn_to_dma(NULL, page_to_pfn(page)),
-                                          PAGE_SIZE << pool->order,
-                                          DMA_BIDIRECTIONAL);
-       return page;
-}
-
-static void ion_page_pool_free_pages(struct ion_page_pool *pool,
-                                    struct page *page)
-{
-       __free_pages(page, pool->order);
-}
-
-static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
-{
-       struct ion_page_pool_item *item;
-
-       item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
-       if (!item)
-               return -ENOMEM;
-
-       mutex_lock(&pool->mutex);
-       item->page = page;
-       if (PageHighMem(page)) {
-               list_add_tail(&item->list, &pool->high_items);
-               pool->high_count++;
-       } else {
-               list_add_tail(&item->list, &pool->low_items);
-               pool->low_count++;
-       }
-       mutex_unlock(&pool->mutex);
-       return 0;
-}
-
-static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
-{
-       struct ion_page_pool_item *item;
-       struct page *page;
-
-       if (high) {
-               BUG_ON(!pool->high_count);
-               item = list_first_entry(&pool->high_items,
-                                       struct ion_page_pool_item, list);
-               pool->high_count--;
-       } else {
-               BUG_ON(!pool->low_count);
-               item = list_first_entry(&pool->low_items,
-                                       struct ion_page_pool_item, list);
-               pool->low_count--;
-       }
-
-       list_del(&item->list);
-       page = item->page;
-       kfree(item);
-       return page;
-}
-
-void *ion_page_pool_alloc(struct ion_page_pool *pool)
-{
-       struct page *page = NULL;
-
-       BUG_ON(!pool);
-
-       mutex_lock(&pool->mutex);
-       if (pool->high_count)
-               page = ion_page_pool_remove(pool, true);
-       else if (pool->low_count)
-               page = ion_page_pool_remove(pool, false);
-       mutex_unlock(&pool->mutex);
-
-       if (!page)
-               page = ion_page_pool_alloc_pages(pool);
-
-       return page;
-}
-
-void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
-{
-       int ret;
-
-       ret = ion_page_pool_add(pool, page);
-       if (ret)
-               ion_page_pool_free_pages(pool, page);
-}
-
-static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
-{
-       int total = 0;
-
-       total += high ? (pool->high_count + pool->low_count) *
-               (1 << pool->order) :
-                       pool->low_count * (1 << pool->order);
-       return total;
-}
-
-int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
-                               int nr_to_scan)
-{
-       int nr_freed = 0;
-       int i;
-       bool high;
-
-       high = gfp_mask & __GFP_HIGHMEM;
-
-       if (nr_to_scan == 0)
-               return ion_page_pool_total(pool, high);
-
-       for (i = 0; i < nr_to_scan; i++) {
-               struct page *page;
-
-               mutex_lock(&pool->mutex);
-               if (high && pool->high_count) {
-                       page = ion_page_pool_remove(pool, true);
-               } else if (pool->low_count) {
-                       page = ion_page_pool_remove(pool, false);
-               } else {
-                       mutex_unlock(&pool->mutex);
-                       break;
-               }
-               mutex_unlock(&pool->mutex);
-               ion_page_pool_free_pages(pool, page);
-               nr_freed += (1 << pool->order);
-       }
-
-       return nr_freed;
-}
-
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
-{
-       struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
-                                            GFP_KERNEL);
-       if (!pool)
-               return NULL;
-       pool->high_count = 0;
-       pool->low_count = 0;
-       INIT_LIST_HEAD(&pool->low_items);
-       INIT_LIST_HEAD(&pool->high_items);
-       pool->gfp_mask = gfp_mask;
-       pool->order = order;
-       mutex_init(&pool->mutex);
-       plist_node_init(&pool->list, order);
-
-       return pool;
-}
-
-void ion_page_pool_destroy(struct ion_page_pool *pool)
-{
-       kfree(pool);
-}
-
-static int __init ion_page_pool_init(void)
-{
-       return 0;
-}
-
-static void __exit ion_page_pool_exit(void)
-{
-}
-
-module_init(ion_page_pool_init);
-module_exit(ion_page_pool_exit);
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
deleted file mode 100644 (file)
index 32461e9..0000000
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * drivers/gpu/ion/ion_priv.h
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _ION_PRIV_H
-#define _ION_PRIV_H
-
-#include <linux/ion.h>
-#include <linux/kref.h>
-#include <linux/mm_types.h>
-#include <linux/mutex.h>
-#include <linux/rbtree.h>
-#include <linux/sched.h>
-#include <linux/shrinker.h>
-#include <linux/types.h>
-
-struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
-
-/**
- * struct ion_buffer - metadata for a particular buffer
- * @ref:               refernce count
- * @node:              node in the ion_device buffers tree
- * @dev:               back pointer to the ion_device
- * @heap:              back pointer to the heap the buffer came from
- * @flags:             buffer specific flags
- * @size:              size of the buffer
- * @priv_virt:         private data to the buffer representable as
- *                     a void *
- * @priv_phys:         private data to the buffer representable as
- *                     an ion_phys_addr_t (and someday a phys_addr_t)
- * @lock:              protects the buffers cnt fields
- * @kmap_cnt:          number of times the buffer is mapped to the kernel
- * @vaddr:             the kenrel mapping if kmap_cnt is not zero
- * @dmap_cnt:          number of times the buffer is mapped for dma
- * @sg_table:          the sg table for the buffer if dmap_cnt is not zero
- * @pages:             flat array of pages in the buffer -- used by fault
- *                     handler and only valid for buffers that are faulted in
- * @vmas:              list of vma's mapping this buffer
- * @handle_count:      count of handles referencing this buffer
- * @task_comm:         taskcomm of last client to reference this buffer in a
- *                     handle, used for debugging
- * @pid:               pid of last client to reference this buffer in a
- *                     handle, used for debugging
-*/
-struct ion_buffer {
-       struct kref ref;
-       union {
-               struct rb_node node;
-               struct list_head list;
-       };
-       struct ion_device *dev;
-       struct ion_heap *heap;
-       unsigned long flags;
-       size_t size;
-       union {
-               void *priv_virt;
-               ion_phys_addr_t priv_phys;
-       };
-       struct mutex lock;
-       int kmap_cnt;
-       void *vaddr;
-       int dmap_cnt;
-       struct sg_table *sg_table;
-       struct page **pages;
-       struct list_head vmas;
-       /* used to track orphaned buffers */
-       int handle_count;
-       char task_comm[TASK_COMM_LEN];
-       pid_t pid;
-};
-void ion_buffer_destroy(struct ion_buffer *buffer);
-
-/**
- * struct ion_heap_ops - ops to operate on a given heap
- * @allocate:          allocate memory
- * @free:              free memory
- * @phys               get physical address of a buffer (only define on
- *                     physically contiguous heaps)
- * @map_dma            map the memory for dma to a scatterlist
- * @unmap_dma          unmap the memory for dma
- * @map_kernel         map memory to the kernel
- * @unmap_kernel       unmap memory to the kernel
- * @map_user           map memory to userspace
- *
- * allocate, phys, and map_user return 0 on success, -errno on error.
- * map_dma and map_kernel return pointer on success, ERR_PTR on error.
- */
-struct ion_heap_ops {
-       int (*allocate) (struct ion_heap *heap,
-                        struct ion_buffer *buffer, unsigned long len,
-                        unsigned long align, unsigned long flags);
-       void (*free) (struct ion_buffer *buffer);
-       int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
-                    ion_phys_addr_t *addr, size_t *len);
-       struct sg_table *(*map_dma) (struct ion_heap *heap,
-                                       struct ion_buffer *buffer);
-       void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
-       void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
-       void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
-       int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
-                        struct vm_area_struct *vma);
-};
-
-/**
- * heap flags - flags between the heaps and core ion code
- */
-#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
-
-/**
- * struct ion_heap - represents a heap in the system
- * @node:              rb node to put the heap on the device's tree of heaps
- * @dev:               back pointer to the ion_device
- * @type:              type of heap
- * @ops:               ops struct as above
- * @flags:             flags
- * @id:                        id of heap, also indicates priority of this heap when
- *                     allocating.  These are specified by platform data and
- *                     MUST be unique
- * @name:              used for debugging
- * @shrinker:          a shrinker for the heap, if the heap caches system
- *                     memory, it must define a shrinker to return it on low
- *                     memory conditions, this includes system memory cached
- *                     in the deferred free lists for heaps that support it
- * @free_list:         free list head if deferred free is used
- * @free_list_size     size of the deferred free list in bytes
- * @lock:              protects the free list
- * @waitqueue:         queue to wait on from deferred free thread
- * @task:              task struct of deferred free thread
- * @debug_show:                called when heap debug file is read to add any
- *                     heap specific debug info to output
- *
- * Represents a pool of memory from which buffers can be made.  In some
- * systems the only heap is regular system memory allocated via vmalloc.
- * On others, some blocks might require large physically contiguous buffers
- * that are allocated from a specially reserved heap.
- */
-struct ion_heap {
-       struct plist_node node;
-       struct ion_device *dev;
-       enum ion_heap_type type;
-       struct ion_heap_ops *ops;
-       unsigned long flags;
-       unsigned int id;
-       const char *name;
-       struct shrinker shrinker;
-       struct list_head free_list;
-       size_t free_list_size;
-       struct rt_mutex lock;
-       wait_queue_head_t waitqueue;
-       struct task_struct *task;
-       int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
-};
-
-/**
- * ion_buffer_cached - this ion buffer is cached
- * @buffer:            buffer
- *
- * indicates whether this ion buffer is cached
- */
-bool ion_buffer_cached(struct ion_buffer *buffer);
-
-/**
- * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
- * @buffer:            buffer
- *
- * indicates whether userspace mappings of this buffer will be faulted
- * in, this can affect how buffers are allocated from the heap.
- */
-bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
-
-/**
- * ion_device_create - allocates and returns an ion device
- * @custom_ioctl:      arch specific ioctl function if applicable
- *
- * returns a valid device or -PTR_ERR
- */
-struct ion_device *ion_device_create(long (*custom_ioctl)
-                                    (struct ion_client *client,
-                                     unsigned int cmd,
-                                     unsigned long arg));
-
-/**
- * ion_device_destroy - free and device and it's resource
- * @dev:               the device
- */
-void ion_device_destroy(struct ion_device *dev);
-
-/**
- * ion_device_add_heap - adds a heap to the ion device
- * @dev:               the device
- * @heap:              the heap to add
- */
-void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
-
-/**
- * some helpers for common operations on buffers using the sg_table
- * and vaddr fields
- */
-void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
-void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
-int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
-                       struct vm_area_struct *);
-int ion_heap_buffer_zero(struct ion_buffer *buffer);
-
-/**
- * ion_heap_alloc_pages - allocate pages from alloc_pages
- * @buffer:            the buffer to allocate for, used to extract the flags
- * @gfp_flags:         the gfp_t for the allocation
- * @order:             the order of the allocatoin
- *
- * This funciton allocations from alloc pages and also does any other
- * necessary operations based on the buffer->flags.  For buffers which
- * will be faulted in the pages are split using split_page
- */
-struct page *ion_heap_alloc_pages(struct ion_buffer *buffer, gfp_t gfp_flags,
-                                 unsigned int order);
-
-/**
- * ion_heap_init_deferred_free -- initialize deferred free functionality
- * @heap:              the heap
- *
- * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
- * be called to setup deferred frees. Calls to free the buffer will
- * return immediately and the actual free will occur some time later
- */
-int ion_heap_init_deferred_free(struct ion_heap *heap);
-
-/**
- * ion_heap_freelist_add - add a buffer to the deferred free list
- * @heap:              the heap
- * @buffer:            the buffer
- *
- * Adds an item to the deferred freelist.
- */
-void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
-
-/**
- * ion_heap_freelist_drain - drain the deferred free list
- * @heap:              the heap
- * @size:              ammount of memory to drain in bytes
- *
- * Drains the indicated amount of memory from the deferred freelist immediately.
- * Returns the total amount freed.  The total freed may be higher depending
- * on the size of the items in the list, or lower if there is insufficient
- * total memory on the freelist.
- */
-size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
-
-/**
- * ion_heap_freelist_size - returns the size of the freelist in bytes
- * @heap:              the heap
- */
-size_t ion_heap_freelist_size(struct ion_heap *heap);
-
-
-/**
- * functions for creating and destroying the built in ion heaps.
- * architectures can add their own custom architecture specific
- * heaps as appropriate.
- */
-
-struct ion_heap *ion_heap_create(struct ion_platform_heap *);
-void ion_heap_destroy(struct ion_heap *);
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
-void ion_system_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
-void ion_system_contig_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
-void ion_carveout_heap_destroy(struct ion_heap *);
-
-struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
-void ion_chunk_heap_destroy(struct ion_heap *);
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
-void ion_cma_heap_destroy(struct ion_heap *);
-
-/**
- * kernel api to allocate/free from carveout -- used when carveout is
- * used to back an architecture specific custom heap
- */
-ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
-                                     unsigned long align);
-void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
-                      unsigned long size);
-/**
- * The carveout heap returns physical addresses, since 0 may be a valid
- * physical address, this is used to indicate allocation failed
- */
-#define ION_CARVEOUT_ALLOCATE_FAIL -1
-
-/**
- * functions for creating and destroying a heap pool -- allows you
- * to keep a pool of pre allocated memory to use from your heap.  Keeping
- * a pool of memory that is ready for dma, ie any cached mapping have been
- * invalidated from the cache, provides a significant peformance benefit on
- * many systems */
-
-/**
- * struct ion_page_pool - pagepool struct
- * @high_count:                number of highmem items in the pool
- * @low_count:         number of lowmem items in the pool
- * @high_items:                list of highmem items
- * @low_items:         list of lowmem items
- * @shrinker:          a shrinker for the items
- * @mutex:             lock protecting this struct and especially the count
- *                     item list
- * @alloc:             function to be used to allocate pageory when the pool
- *                     is empty
- * @free:              function to be used to free pageory back to the system
- *                     when the shrinker fires
- * @gfp_mask:          gfp_mask to use from alloc
- * @order:             order of pages in the pool
- * @list:              plist node for list of pools
- *
- * Allows you to keep a pool of pre allocated pages to use from your heap.
- * Keeping a pool of pages that is ready for dma, ie any cached mapping have
- * been invalidated from the cache, provides a significant peformance benefit
- * on many systems
- */
-struct ion_page_pool {
-       int high_count;
-       int low_count;
-       struct list_head high_items;
-       struct list_head low_items;
-       struct mutex mutex;
-       gfp_t gfp_mask;
-       unsigned int order;
-       struct plist_node list;
-};
-
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
-void ion_page_pool_destroy(struct ion_page_pool *);
-void *ion_page_pool_alloc(struct ion_page_pool *);
-void ion_page_pool_free(struct ion_page_pool *, struct page *);
-
-/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
- * @pool:              the pool
- * @gfp_mask:          the memory type to reclaim
- * @nr_to_scan:                number of items to shrink in pages
- *
- * returns the number of items freed in pages
- */
-int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
-                         int nr_to_scan);
-
-#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
deleted file mode 100644 (file)
index 5fe81a7..0000000
+++ /dev/null
@@ -1,450 +0,0 @@
-/*
- * drivers/gpu/ion/ion_system_heap.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <asm/page.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/highmem.h>
-#include <linux/ion.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "ion_priv.h"
-
-static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
-                                           __GFP_NOWARN | __GFP_NORETRY) &
-                                          ~__GFP_WAIT;
-static unsigned int low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO |
-                                        __GFP_NOWARN);
-static const unsigned int orders[] = {8, 4, 0};
-static const int num_orders = ARRAY_SIZE(orders);
-static int order_to_index(unsigned int order)
-{
-       int i;
-       for (i = 0; i < num_orders; i++)
-               if (order == orders[i])
-                       return i;
-       BUG();
-       return -1;
-}
-
-static unsigned int order_to_size(int order)
-{
-       return PAGE_SIZE << order;
-}
-
-struct ion_system_heap {
-       struct ion_heap heap;
-       struct ion_page_pool **pools;
-};
-
-struct page_info {
-       struct page *page;
-       unsigned int order;
-       struct list_head list;
-};
-
-static struct page *alloc_buffer_page(struct ion_system_heap *heap,
-                                     struct ion_buffer *buffer,
-                                     unsigned long order)
-{
-       bool cached = ion_buffer_cached(buffer);
-       struct ion_page_pool *pool = heap->pools[order_to_index(order)];
-       struct page *page;
-
-       if (!cached) {
-               page = ion_page_pool_alloc(pool);
-       } else {
-               gfp_t gfp_flags = low_order_gfp_flags;
-
-               if (order > 4)
-                       gfp_flags = high_order_gfp_flags;
-               page = ion_heap_alloc_pages(buffer, gfp_flags, order);
-               if (!page)
-                       return 0;
-               arm_dma_ops.sync_single_for_device(NULL,
-                       pfn_to_dma(NULL, page_to_pfn(page)),
-                       PAGE_SIZE << order, DMA_BIDIRECTIONAL);
-       }
-       if (!page)
-               return 0;
-
-       return page;
-}
-
-static void free_buffer_page(struct ion_system_heap *heap,
-                            struct ion_buffer *buffer, struct page *page,
-                            unsigned int order)
-{
-       bool cached = ion_buffer_cached(buffer);
-       bool split_pages = ion_buffer_fault_user_mappings(buffer);
-       int i;
-
-       if (!cached) {
-               struct ion_page_pool *pool = heap->pools[order_to_index(order)];
-               ion_page_pool_free(pool, page);
-       } else if (split_pages) {
-               for (i = 0; i < (1 << order); i++)
-                       __free_page(page + i);
-       } else {
-               __free_pages(page, order);
-       }
-}
-
-
-static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
-                                                struct ion_buffer *buffer,
-                                                unsigned long size,
-                                                unsigned int max_order)
-{
-       struct page *page;
-       struct page_info *info;
-       int i;
-
-       for (i = 0; i < num_orders; i++) {
-               if (size < order_to_size(orders[i]))
-                       continue;
-               if (max_order < orders[i])
-                       continue;
-
-               page = alloc_buffer_page(heap, buffer, orders[i]);
-               if (!page)
-                       continue;
-
-               info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
-               info->page = page;
-               info->order = orders[i];
-               return info;
-       }
-       return NULL;
-}
-
-static int ion_system_heap_allocate(struct ion_heap *heap,
-                                    struct ion_buffer *buffer,
-                                    unsigned long size, unsigned long align,
-                                    unsigned long flags)
-{
-       struct ion_system_heap *sys_heap = container_of(heap,
-                                                       struct ion_system_heap,
-                                                       heap);
-       struct sg_table *table;
-       struct scatterlist *sg;
-       int ret;
-       struct list_head pages;
-       struct page_info *info, *tmp_info;
-       int i = 0;
-       long size_remaining = PAGE_ALIGN(size);
-       unsigned int max_order = orders[0];
-
-       INIT_LIST_HEAD(&pages);
-       while (size_remaining > 0) {
-               info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
-               if (!info)
-                       goto err;
-               list_add_tail(&info->list, &pages);
-               size_remaining -= (1 << info->order) * PAGE_SIZE;
-               max_order = info->order;
-               i++;
-       }
-
-       table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!table)
-               goto err;
-
-       ret = sg_alloc_table(table, i, GFP_KERNEL);
-       if (ret)
-               goto err1;
-
-       sg = table->sgl;
-       list_for_each_entry_safe(info, tmp_info, &pages, list) {
-               struct page *page = info->page;
-               sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
-               sg = sg_next(sg);
-               list_del(&info->list);
-               kfree(info);
-       }
-
-       buffer->priv_virt = table;
-       return 0;
-err1:
-       kfree(table);
-err:
-       list_for_each_entry(info, &pages, list) {
-               free_buffer_page(sys_heap, buffer, info->page, info->order);
-               kfree(info);
-       }
-       return -ENOMEM;
-}
-
-void ion_system_heap_free(struct ion_buffer *buffer)
-{
-       struct ion_heap *heap = buffer->heap;
-       struct ion_system_heap *sys_heap = container_of(heap,
-                                                       struct ion_system_heap,
-                                                       heap);
-       struct sg_table *table = buffer->sg_table;
-       bool cached = ion_buffer_cached(buffer);
-       struct scatterlist *sg;
-       LIST_HEAD(pages);
-       int i;
-
-       /* uncached pages come from the page pools, zero them before returning
-          for security purposes (other allocations are zerod at alloc time */
-       if (!cached)
-               ion_heap_buffer_zero(buffer);
-
-       for_each_sg(table->sgl, sg, table->nents, i)
-               free_buffer_page(sys_heap, buffer, sg_page(sg),
-                               get_order(sg_dma_len(sg)));
-       sg_free_table(table);
-       kfree(table);
-}
-
-struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
-                                        struct ion_buffer *buffer)
-{
-       return buffer->priv_virt;
-}
-
-void ion_system_heap_unmap_dma(struct ion_heap *heap,
-                              struct ion_buffer *buffer)
-{
-       return;
-}
-
-static struct ion_heap_ops system_heap_ops = {
-       .allocate = ion_system_heap_allocate,
-       .free = ion_system_heap_free,
-       .map_dma = ion_system_heap_map_dma,
-       .unmap_dma = ion_system_heap_unmap_dma,
-       .map_kernel = ion_heap_map_kernel,
-       .unmap_kernel = ion_heap_unmap_kernel,
-       .map_user = ion_heap_map_user,
-};
-
-static int ion_system_heap_shrink(struct shrinker *shrinker,
-                                 struct shrink_control *sc) {
-
-       struct ion_heap *heap = container_of(shrinker, struct ion_heap,
-                                            shrinker);
-       struct ion_system_heap *sys_heap = container_of(heap,
-                                                       struct ion_system_heap,
-                                                       heap);
-       int nr_total = 0;
-       int nr_freed = 0;
-       int i;
-
-       if (sc->nr_to_scan == 0)
-               goto end;
-
-       /* shrink the free list first, no point in zeroing the memory if
-          we're just going to reclaim it */
-       nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) /
-               PAGE_SIZE;
-
-       if (nr_freed >= sc->nr_to_scan)
-               goto end;
-
-       for (i = 0; i < num_orders; i++) {
-               struct ion_page_pool *pool = sys_heap->pools[i];
-
-               nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask,
-                                                sc->nr_to_scan);
-               if (nr_freed >= sc->nr_to_scan)
-                       break;
-       }
-
-end:
-       /* total number of items is whatever the page pools are holding
-          plus whatever's in the freelist */
-       for (i = 0; i < num_orders; i++) {
-               struct ion_page_pool *pool = sys_heap->pools[i];
-               nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0);
-       }
-       nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
-       return nr_total;
-
-}
-
-static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
-                                     void *unused)
-{
-
-       struct ion_system_heap *sys_heap = container_of(heap,
-                                                       struct ion_system_heap,
-                                                       heap);
-       int i;
-       for (i = 0; i < num_orders; i++) {
-               struct ion_page_pool *pool = sys_heap->pools[i];
-               seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
-                          pool->high_count, pool->order,
-                          (1 << pool->order) * PAGE_SIZE * pool->high_count);
-               seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
-                          pool->low_count, pool->order,
-                          (1 << pool->order) * PAGE_SIZE * pool->low_count);
-       }
-       return 0;
-}
-
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
-{
-       struct ion_system_heap *heap;
-       int i;
-
-       heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
-       if (!heap)
-               return ERR_PTR(-ENOMEM);
-       heap->heap.ops = &system_heap_ops;
-       heap->heap.type = ION_HEAP_TYPE_SYSTEM;
-       heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
-       heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
-                             GFP_KERNEL);
-       if (!heap->pools)
-               goto err_alloc_pools;
-       for (i = 0; i < num_orders; i++) {
-               struct ion_page_pool *pool;
-               gfp_t gfp_flags = low_order_gfp_flags;
-
-               if (orders[i] > 4)
-                       gfp_flags = high_order_gfp_flags;
-               pool = ion_page_pool_create(gfp_flags, orders[i]);
-               if (!pool)
-                       goto err_create_pool;
-               heap->pools[i] = pool;
-       }
-
-       heap->heap.shrinker.shrink = ion_system_heap_shrink;
-       heap->heap.shrinker.seeks = DEFAULT_SEEKS;
-       heap->heap.shrinker.batch = 0;
-       register_shrinker(&heap->heap.shrinker);
-       heap->heap.debug_show = ion_system_heap_debug_show;
-       return &heap->heap;
-err_create_pool:
-       for (i = 0; i < num_orders; i++)
-               if (heap->pools[i])
-                       ion_page_pool_destroy(heap->pools[i]);
-       kfree(heap->pools);
-err_alloc_pools:
-       kfree(heap);
-       return ERR_PTR(-ENOMEM);
-}
-
-void ion_system_heap_destroy(struct ion_heap *heap)
-{
-       struct ion_system_heap *sys_heap = container_of(heap,
-                                                       struct ion_system_heap,
-                                                       heap);
-       int i;
-
-       for (i = 0; i < num_orders; i++)
-               ion_page_pool_destroy(sys_heap->pools[i]);
-       kfree(sys_heap->pools);
-       kfree(sys_heap);
-}
-
-static int ion_system_contig_heap_allocate(struct ion_heap *heap,
-                                          struct ion_buffer *buffer,
-                                          unsigned long len,
-                                          unsigned long align,
-                                          unsigned long flags)
-{
-       buffer->priv_virt = kzalloc(len, GFP_KERNEL);
-       if (!buffer->priv_virt)
-               return -ENOMEM;
-       return 0;
-}
-
-void ion_system_contig_heap_free(struct ion_buffer *buffer)
-{
-       kfree(buffer->priv_virt);
-}
-
-static int ion_system_contig_heap_phys(struct ion_heap *heap,
-                                      struct ion_buffer *buffer,
-                                      ion_phys_addr_t *addr, size_t *len)
-{
-       *addr = virt_to_phys(buffer->priv_virt);
-       *len = buffer->size;
-       return 0;
-}
-
-struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
-                                               struct ion_buffer *buffer)
-{
-       struct sg_table *table;
-       int ret;
-
-       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!table)
-               return ERR_PTR(-ENOMEM);
-       ret = sg_alloc_table(table, 1, GFP_KERNEL);
-       if (ret) {
-               kfree(table);
-               return ERR_PTR(ret);
-       }
-       sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
-                   0);
-       return table;
-}
-
-void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
-                                     struct ion_buffer *buffer)
-{
-       sg_free_table(buffer->sg_table);
-       kfree(buffer->sg_table);
-}
-
-int ion_system_contig_heap_map_user(struct ion_heap *heap,
-                                   struct ion_buffer *buffer,
-                                   struct vm_area_struct *vma)
-{
-       unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
-       return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
-                              vma->vm_end - vma->vm_start,
-                              vma->vm_page_prot);
-
-}
-
-static struct ion_heap_ops kmalloc_ops = {
-       .allocate = ion_system_contig_heap_allocate,
-       .free = ion_system_contig_heap_free,
-       .phys = ion_system_contig_heap_phys,
-       .map_dma = ion_system_contig_heap_map_dma,
-       .unmap_dma = ion_system_contig_heap_unmap_dma,
-       .map_kernel = ion_heap_map_kernel,
-       .unmap_kernel = ion_heap_unmap_kernel,
-       .map_user = ion_system_contig_heap_map_user,
-};
-
-struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
-{
-       struct ion_heap *heap;
-
-       heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
-       if (!heap)
-               return ERR_PTR(-ENOMEM);
-       heap->ops = &kmalloc_ops;
-       heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
-       return heap;
-}
-
-void ion_system_contig_heap_destroy(struct ion_heap *heap)
-{
-       kfree(heap);
-}
-
diff --git a/drivers/gpu/ion/ion_system_mapper.c b/drivers/gpu/ion/ion_system_mapper.c
deleted file mode 100644 (file)
index 692458e..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * drivers/gpu/ion/ion_system_mapper.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/ion.h>
-#include <linux/memory.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include "ion_priv.h"
-/*
- * This mapper is valid for any heap that allocates memory that already has
- * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory,
- * pages obtained via io_remap, etc.
- */
-static void *ion_kernel_mapper_map(struct ion_mapper *mapper,
-                                  struct ion_buffer *buffer,
-                                  struct ion_mapping **mapping)
-{
-       if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
-               pr_err("%s: attempting to map an unsupported heap\n", __func__);
-               return ERR_PTR(-EINVAL);
-       }
-       /* XXX REVISIT ME!!! */
-       *((unsigned long *)mapping) = (unsigned long)buffer->priv;
-       return buffer->priv;
-}
-
-static void ion_kernel_mapper_unmap(struct ion_mapper *mapper,
-                                   struct ion_buffer *buffer,
-                                   struct ion_mapping *mapping)
-{
-       if (!((1 << buffer->heap->type) & mapper->heap_mask))
-               pr_err("%s: attempting to unmap an unsupported heap\n",
-                      __func__);
-}
-
-static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper,
-                                       struct ion_buffer *buffer,
-                                       struct ion_mapping *mapping)
-{
-       if (!((1 << buffer->heap->type) & mapper->heap_mask)) {
-               pr_err("%s: attempting to unmap an unsupported heap\n",
-                      __func__);
-               return ERR_PTR(-EINVAL);
-       }
-       return buffer->priv;
-}
-
-static int ion_kernel_mapper_map_user(struct ion_mapper *mapper,
-                                     struct ion_buffer *buffer,
-                                     struct vm_area_struct *vma,
-                                     struct ion_mapping *mapping)
-{
-       int ret;
-
-       switch (buffer->heap->type) {
-       case ION_HEAP_KMALLOC:
-       {
-               unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv));
-               ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
-                                     vma->vm_end - vma->vm_start,
-                                     vma->vm_page_prot);
-               break;
-       }
-       case ION_HEAP_VMALLOC:
-               ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff);
-               break;
-       default:
-               pr_err("%s: attempting to map unsupported heap to userspace\n",
-                      __func__);
-               return -EINVAL;
-       }
-
-       return ret;
-}
-
-static struct ion_mapper_ops ops = {
-       .map = ion_kernel_mapper_map,
-       .map_kernel = ion_kernel_mapper_map_kernel,
-       .map_user = ion_kernel_mapper_map_user,
-       .unmap = ion_kernel_mapper_unmap,
-};
-
-struct ion_mapper *ion_system_mapper_create(void)
-{
-       struct ion_mapper *mapper;
-       mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL);
-       if (!mapper)
-               return ERR_PTR(-ENOMEM);
-       mapper->type = ION_SYSTEM_MAPPER;
-       mapper->ops = &ops;
-       mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC);
-       return mapper;
-}
-
-void ion_system_mapper_destroy(struct ion_mapper *mapper)
-{
-       kfree(mapper);
-}
-
diff --git a/drivers/gpu/ion/tegra/Makefile b/drivers/gpu/ion/tegra/Makefile
deleted file mode 100644 (file)
index 11cd003..0000000
+++ /dev/null
@@ -1 +0,0 @@
-obj-y += tegra_ion.o
diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c
deleted file mode 100644 (file)
index 7af6e16..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * drivers/gpu/tegra/tegra_ion.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/ion.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include "../ion_priv.h"
-
-struct ion_device *idev;
-struct ion_mapper *tegra_user_mapper;
-int num_heaps;
-struct ion_heap **heaps;
-
-int tegra_ion_probe(struct platform_device *pdev)
-{
-       struct ion_platform_data *pdata = pdev->dev.platform_data;
-       int err;
-       int i;
-
-       num_heaps = pdata->nr;
-
-       heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
-
-       idev = ion_device_create(NULL);
-       if (IS_ERR_OR_NULL(idev)) {
-               kfree(heaps);
-               return PTR_ERR(idev);
-       }
-
-       /* create the heaps as specified in the board file */
-       for (i = 0; i < num_heaps; i++) {
-               struct ion_platform_heap *heap_data = &pdata->heaps[i];
-
-               heaps[i] = ion_heap_create(heap_data);
-               if (IS_ERR_OR_NULL(heaps[i])) {
-                       err = PTR_ERR(heaps[i]);
-                       goto err;
-               }
-               ion_device_add_heap(idev, heaps[i]);
-       }
-       platform_set_drvdata(pdev, idev);
-       return 0;
-err:
-       for (i = 0; i < num_heaps; i++) {
-               if (heaps[i])
-                       ion_heap_destroy(heaps[i]);
-       }
-       kfree(heaps);
-       return err;
-}
-
-int tegra_ion_remove(struct platform_device *pdev)
-{
-       struct ion_device *idev = platform_get_drvdata(pdev);
-       int i;
-
-       ion_device_destroy(idev);
-       for (i = 0; i < num_heaps; i++)
-               ion_heap_destroy(heaps[i]);
-       kfree(heaps);
-       return 0;
-}
-
-static struct platform_driver ion_driver = {
-       .probe = tegra_ion_probe,
-       .remove = tegra_ion_remove,
-       .driver = { .name = "ion-tegra" }
-};
-
-static int __init ion_init(void)
-{
-       return platform_driver_register(&ion_driver);
-}
-
-static void __exit ion_exit(void)
-{
-       platform_driver_unregister(&ion_driver);
-}
-
-module_init(ion_init);
-module_exit(ion_exit);
-
index fe11eb6fa966ecb7fc39ac3288a51412c35631e6..6da535db25388c94e70d551aacd04b47b3920d55 100644 (file)
@@ -108,6 +108,8 @@ config SW_SYNC_USER
          *WARNING* improper use of this can result in deadlocking kernel
          drivers from userspace.
 
+source "drivers/staging/android/ion/Kconfig"
+
 endif # if ANDROID
 
 endmenu
index c136299e05afd6f5657061e3577dc46e549e6d6b..0a01e1914905fc2e2d9b793706b665c2dbc10509 100644 (file)
@@ -1,5 +1,7 @@
 ccflags-y += -I$(src)                  # needed for trace events
 
+obj-y                                  += ion/
+
 obj-$(CONFIG_ANDROID_BINDER_IPC)       += binder.o
 obj-$(CONFIG_ASHMEM)                   += ashmem.o
 obj-$(CONFIG_ANDROID_LOGGER)           += logger.o
index 4fd32f337f9cbb878419794e966fe8b47851a75a..495b20cf3bf67e18e0ebf49ff6322a0414d08869 100644 (file)
 #ifndef _LINUX_ANDROID_ALARM_H
 #define _LINUX_ANDROID_ALARM_H
 
-#include <linux/ioctl.h>
-#include <linux/time.h>
 #include <linux/compat.h>
+#include <linux/ioctl.h>
 
-enum android_alarm_type {
-       /* return code bit numbers or set alarm arg */
-       ANDROID_ALARM_RTC_WAKEUP,
-       ANDROID_ALARM_RTC,
-       ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
-       ANDROID_ALARM_ELAPSED_REALTIME,
-       ANDROID_ALARM_SYSTEMTIME,
-
-       ANDROID_ALARM_TYPE_COUNT,
-
-       /* return code bit numbers */
-       /* ANDROID_ALARM_TIME_CHANGE = 16 */
-};
-
-enum android_alarm_return_flags {
-       ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
-       ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
-       ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
-                               1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
-       ANDROID_ALARM_ELAPSED_REALTIME_MASK =
-                               1U << ANDROID_ALARM_ELAPSED_REALTIME,
-       ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
-       ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
-};
-
-/* Disable alarm */
-#define ANDROID_ALARM_CLEAR(type)           _IO('a', 0 | ((type) << 4))
-
-/* Ack last alarm and wait for next */
-#define ANDROID_ALARM_WAIT                  _IO('a', 1)
-
-#define ALARM_IOW(c, type, size)            _IOW('a', (c) | ((type) << 4), size)
-/* Set alarm */
-#define ANDROID_ALARM_SET(type)             ALARM_IOW(2, type, struct timespec)
-#define ANDROID_ALARM_SET_AND_WAIT(type)    ALARM_IOW(3, type, struct timespec)
-#define ANDROID_ALARM_GET_TIME(type)        ALARM_IOW(4, type, struct timespec)
-#define ANDROID_ALARM_SET_RTC               _IOW('a', 5, struct timespec)
-#define ANDROID_ALARM_BASE_CMD(cmd)         (cmd & ~(_IOC(0, 0, 0xf0, 0)))
-#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd)    (_IOC_NR(cmd) >> 4)
-
+#include "uapi/android_alarm.h"
 
 #ifdef CONFIG_COMPAT
 #define ANDROID_ALARM_SET_COMPAT(type)         ALARM_IOW(2, type, \
index 8dc0f0d3adf310b651cf6b17742795ae5269600d..5abcfd7aa706c37ac6c214323429bfbe6dfeff51 100644 (file)
 #include <linux/ioctl.h>
 #include <linux/compat.h>
 
-#define ASHMEM_NAME_LEN                256
-
-#define ASHMEM_NAME_DEF                "dev/ashmem"
-
-/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
-#define ASHMEM_NOT_PURGED      0
-#define ASHMEM_WAS_PURGED      1
-
-/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
-#define ASHMEM_IS_UNPINNED     0
-#define ASHMEM_IS_PINNED       1
-
-struct ashmem_pin {
-       __u32 offset;   /* offset into region, in bytes, page-aligned */
-       __u32 len;      /* length forward from offset, in bytes, page-aligned */
-};
-
-#define __ASHMEMIOC            0x77
-
-#define ASHMEM_SET_NAME                _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
-#define ASHMEM_GET_NAME                _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
-#define ASHMEM_SET_SIZE                _IOW(__ASHMEMIOC, 3, size_t)
-#define ASHMEM_GET_SIZE                _IO(__ASHMEMIOC, 4)
-#define ASHMEM_SET_PROT_MASK   _IOW(__ASHMEMIOC, 5, unsigned long)
-#define ASHMEM_GET_PROT_MASK   _IO(__ASHMEMIOC, 6)
-#define ASHMEM_PIN             _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
-#define ASHMEM_UNPIN           _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
-#define ASHMEM_GET_PIN_STATUS  _IO(__ASHMEMIOC, 9)
-#define ASHMEM_PURGE_ALL_CACHES        _IO(__ASHMEMIOC, 10)
+#include "uapi/ashmem.h"
 
 /* support of 32bit userspace on 64bit platforms */
 #ifdef CONFIG_COMPAT
index dbe81ceca1bdd3615373f44c25267cd385646946..d4101a671718ef2c8428c18074dec6fd13f94c65 100644 (file)
 #ifndef _LINUX_BINDER_H
 #define _LINUX_BINDER_H
 
-#include <linux/ioctl.h>
-
-#define B_PACK_CHARS(c1, c2, c3, c4) \
-       ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
-#define B_TYPE_LARGE 0x85
-
-enum {
-       BINDER_TYPE_BINDER      = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
-       BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
-       BINDER_TYPE_HANDLE      = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
-       BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
-       BINDER_TYPE_FD          = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
-};
-
-enum {
-       FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
-       FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
-};
-
-/*
- * This is the flattened representation of a Binder object for transfer
- * between processes.  The 'offsets' supplied as part of a binder transaction
- * contains offsets into the data where these structures occur.  The Binder
- * driver takes care of re-writing the structure type and data as it moves
- * between processes.
- */
-struct flat_binder_object {
-       /* 8 bytes for large_flat_header. */
-       unsigned long           type;
-       unsigned long           flags;
-
-       /* 8 bytes of data. */
-       union {
-               void __user     *binder;        /* local object */
-               signed long     handle;         /* remote object */
-       };
-
-       /* extra data associated with local object */
-       void __user             *cookie;
-};
-
-/*
- * On 64-bit platforms where user code may run in 32-bits the driver must
- * translate the buffer (and local binder) addresses appropriately.
- */
-
-struct binder_write_read {
-       signed long     write_size;     /* bytes to write */
-       signed long     write_consumed; /* bytes consumed by driver */
-       unsigned long   write_buffer;
-       signed long     read_size;      /* bytes to read */
-       signed long     read_consumed;  /* bytes consumed by driver */
-       unsigned long   read_buffer;
-};
-
-/* Use with BINDER_VERSION, driver fills in fields. */
-struct binder_version {
-       /* driver protocol version -- increment with incompatible change */
-       signed long     protocol_version;
-};
-
-/* This is the current protocol version. */
-#define BINDER_CURRENT_PROTOCOL_VERSION 7
-
-#define BINDER_WRITE_READ              _IOWR('b', 1, struct binder_write_read)
-#define        BINDER_SET_IDLE_TIMEOUT         _IOW('b', 3, __s64)
-#define        BINDER_SET_MAX_THREADS          _IOW('b', 5, size_t)
-#define        BINDER_SET_IDLE_PRIORITY        _IOW('b', 6, __s32)
-#define        BINDER_SET_CONTEXT_MGR          _IOW('b', 7, __s32)
-#define        BINDER_THREAD_EXIT              _IOW('b', 8, __s32)
-#define BINDER_VERSION                 _IOWR('b', 9, struct binder_version)
-
-/*
- * NOTE: Two special error codes you should check for when calling
- * in to the driver are:
- *
- * EINTR -- The operation has been interupted.  This should be
- * handled by retrying the ioctl() until a different error code
- * is returned.
- *
- * ECONNREFUSED -- The driver is no longer accepting operations
- * from your process.  That is, the process is being destroyed.
- * You should handle this by exiting from your process.  Note
- * that once this error code is returned, all further calls to
- * the driver from any thread will return this same code.
- */
-
-enum transaction_flags {
-       TF_ONE_WAY      = 0x01, /* this is a one-way call: async, no return */
-       TF_ROOT_OBJECT  = 0x04, /* contents are the component's root object */
-       TF_STATUS_CODE  = 0x08, /* contents are a 32-bit status code */
-       TF_ACCEPT_FDS   = 0x10, /* allow replies with file descriptors */
-};
-
-struct binder_transaction_data {
-       /* The first two are only used for bcTRANSACTION and brTRANSACTION,
-        * identifying the target and contents of the transaction.
-        */
-       union {
-               size_t  handle; /* target descriptor of command transaction */
-               void    *ptr;   /* target descriptor of return transaction */
-       } target;
-       void            *cookie;        /* target object cookie */
-       unsigned int    code;           /* transaction command */
-
-       /* General information about the transaction. */
-       unsigned int    flags;
-       pid_t           sender_pid;
-       uid_t           sender_euid;
-       size_t          data_size;      /* number of bytes of data */
-       size_t          offsets_size;   /* number of bytes of offsets */
-
-       /* If this transaction is inline, the data immediately
-        * follows here; otherwise, it ends with a pointer to
-        * the data buffer.
-        */
-       union {
-               struct {
-                       /* transaction data */
-                       const void __user       *buffer;
-                       /* offsets from buffer to flat_binder_object structs */
-                       const void __user       *offsets;
-               } ptr;
-               uint8_t buf[8];
-       } data;
-};
-
-struct binder_ptr_cookie {
-       void *ptr;
-       void *cookie;
-};
-
-struct binder_pri_desc {
-       int priority;
-       int desc;
-};
-
-struct binder_pri_ptr_cookie {
-       int priority;
-       void *ptr;
-       void *cookie;
-};
-
-enum binder_driver_return_protocol {
-       BR_ERROR = _IOR('r', 0, int),
-       /*
-        * int: error code
-        */
-
-       BR_OK = _IO('r', 1),
-       /* No parameters! */
-
-       BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
-       BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
-       /*
-        * binder_transaction_data: the received command.
-        */
-
-       BR_ACQUIRE_RESULT = _IOR('r', 4, int),
-       /*
-        * not currently supported
-        * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
-        * Else the remote object has acquired a primary reference.
-        */
-
-       BR_DEAD_REPLY = _IO('r', 5),
-       /*
-        * The target of the last transaction (either a bcTRANSACTION or
-        * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
-        */
-
-       BR_TRANSACTION_COMPLETE = _IO('r', 6),
-       /*
-        * No parameters... always refers to the last transaction requested
-        * (including replies).  Note that this will be sent even for
-        * asynchronous transactions.
-        */
-
-       BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
-       BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
-       BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
-       BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
-       /*
-        * void *:      ptr to binder
-        * void *: cookie for binder
-        */
-
-       BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
-       /*
-        * not currently supported
-        * int: priority
-        * void *: ptr to binder
-        * void *: cookie for binder
-        */
-
-       BR_NOOP = _IO('r', 12),
-       /*
-        * No parameters.  Do nothing and examine the next command.  It exists
-        * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
-        */
-
-       BR_SPAWN_LOOPER = _IO('r', 13),
-       /*
-        * No parameters.  The driver has determined that a process has no
-        * threads waiting to service incoming transactions.  When a process
-        * receives this command, it must spawn a new service thread and
-        * register it via bcENTER_LOOPER.
-        */
-
-       BR_FINISHED = _IO('r', 14),
-       /*
-        * not currently supported
-        * stop threadpool thread
-        */
-
-       BR_DEAD_BINDER = _IOR('r', 15, void *),
-       /*
-        * void *: cookie
-        */
-       BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
-       /*
-        * void *: cookie
-        */
-
-       BR_FAILED_REPLY = _IO('r', 17),
-       /*
-        * The the last transaction (either a bcTRANSACTION or
-        * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
-        */
-};
-
-enum binder_driver_command_protocol {
-       BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
-       BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
-       /*
-        * binder_transaction_data: the sent command.
-        */
-
-       BC_ACQUIRE_RESULT = _IOW('c', 2, int),
-       /*
-        * not currently supported
-        * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
-        * Else you have acquired a primary reference on the object.
-        */
-
-       BC_FREE_BUFFER = _IOW('c', 3, int),
-       /*
-        * void *: ptr to transaction data received on a read
-        */
-
-       BC_INCREFS = _IOW('c', 4, int),
-       BC_ACQUIRE = _IOW('c', 5, int),
-       BC_RELEASE = _IOW('c', 6, int),
-       BC_DECREFS = _IOW('c', 7, int),
-       /*
-        * int: descriptor
-        */
-
-       BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
-       BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
-       /*
-        * void *: ptr to binder
-        * void *: cookie for binder
-        */
-
-       BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
-       /*
-        * not currently supported
-        * int: priority
-        * int: descriptor
-        */
-
-       BC_REGISTER_LOOPER = _IO('c', 11),
-       /*
-        * No parameters.
-        * Register a spawned looper thread with the device.
-        */
-
-       BC_ENTER_LOOPER = _IO('c', 12),
-       BC_EXIT_LOOPER = _IO('c', 13),
-       /*
-        * No parameters.
-        * These two commands are sent as an application-level thread
-        * enters and exits the binder loop, respectively.  They are
-        * used so the binder can have an accurate count of the number
-        * of looping threads it has available.
-        */
-
-       BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
-       /*
-        * void *: ptr to binder
-        * void *: cookie
-        */
-
-       BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
-       /*
-        * void *: ptr to binder
-        * void *: cookie
-        */
-
-       BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
-       /*
-        * void *: cookie
-        */
-};
+#include "uapi/binder.h"
 
 #endif /* _LINUX_BINDER_H */
 
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
new file mode 100644 (file)
index 0000000..a342d96
--- /dev/null
@@ -0,0 +1,24 @@
+menuconfig ION
+       tristate "Ion Memory Manager"
+       select GENERIC_ALLOCATOR
+       select DMA_SHARED_BUFFER
+       ---help---
+         Chose this option to enable the ION Memory Manager,
+         used by Android to efficiently allocate buffers
+         from userspace that can be shared between drivers.
+         If you're not using Android its probably safe to
+         say N here.
+
+config ION_TEST
+       tristate "Ion Test Device"
+       depends on ION
+       help
+         Choose this option to create a device that can be used to test the
+         kernel and device side ION functions.
+
+config ION_TEGRA
+       tristate "Ion for Tegra"
+       depends on ARCH_TEGRA && ION
+       help
+         Choose this option if you wish to use ion on an nVidia Tegra.
+
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
new file mode 100644 (file)
index 0000000..75039b9
--- /dev/null
@@ -0,0 +1,7 @@
+obj-$(CONFIG_ION) +=   ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
+                       ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
+obj-$(CONFIG_ION_TEST) += ion_test.o
+ifdef CONFIG_COMPAT
+obj-$(CONFIG_ION) += compat_ion.o
+endif
+obj-$(CONFIG_ION_TEGRA) += tegra/
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
new file mode 100644 (file)
index 0000000..e9a8132
--- /dev/null
@@ -0,0 +1,177 @@
+/*
+ * drivers/gpu/ion/compat_ion.c
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "ion.h"
+#include "compat_ion.h"
+
+/* See drivers/staging/android/uapi/ion.h for the definition of these structs */
+struct compat_ion_allocation_data {
+       compat_size_t len;
+       compat_size_t align;
+       compat_uint_t heap_id_mask;
+       compat_uint_t flags;
+       compat_int_t handle;
+};
+
+struct compat_ion_custom_data {
+       compat_uint_t cmd;
+       compat_ulong_t arg;
+};
+
+#define COMPAT_ION_IOC_ALLOC   _IOWR(ION_IOC_MAGIC, 0, \
+                                     struct compat_ion_allocation_data)
+#define COMPAT_ION_IOC_FREE    _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+#define COMPAT_ION_IOC_CUSTOM  _IOWR(ION_IOC_MAGIC, 6, \
+                                     struct compat_ion_custom_data)
+
+static int compat_get_ion_allocation_data(
+                       struct compat_ion_allocation_data __user *data32,
+                       struct ion_allocation_data __user *data)
+{
+       compat_size_t s;
+       compat_uint_t u;
+       compat_int_t i;
+       int err;
+
+       err = get_user(s, &data32->len);
+       err |= put_user(s, &data->len);
+       err |= get_user(s, &data32->align);
+       err |= put_user(s, &data->align);
+       err |= get_user(u, &data32->heap_id_mask);
+       err |= put_user(u, &data->heap_id_mask);
+       err |= get_user(u, &data32->flags);
+       err |= put_user(u, &data->flags);
+       err |= get_user(i, &data32->handle);
+       err |= put_user(i, &data->handle);
+
+       return err;
+}
+
+static int compat_put_ion_allocation_data(
+                       struct compat_ion_allocation_data __user *data32,
+                       struct ion_allocation_data __user *data)
+{
+       compat_size_t s;
+       compat_uint_t u;
+       compat_int_t i;
+       int err;
+
+       err = get_user(s, &data->len);
+       err |= put_user(s, &data32->len);
+       err |= get_user(s, &data->align);
+       err |= put_user(s, &data32->align);
+       err |= get_user(u, &data->heap_id_mask);
+       err |= put_user(u, &data32->heap_id_mask);
+       err |= get_user(u, &data->flags);
+       err |= put_user(u, &data32->flags);
+       err |= get_user(i, &data->handle);
+       err |= put_user(i, &data32->handle);
+
+       return err;
+}
+
+static int compat_get_ion_custom_data(
+                       struct compat_ion_custom_data __user *data32,
+                       struct ion_custom_data __user *data)
+{
+       compat_uint_t cmd;
+       compat_ulong_t arg;
+       int err;
+
+       err = get_user(cmd, &data32->cmd);
+       err |= put_user(cmd, &data->cmd);
+       err |= get_user(arg, &data32->arg);
+       err |= put_user(arg, &data->arg);
+
+       return err;
+};
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       long ret;
+
+       if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+               return -ENOTTY;
+
+       switch (cmd) {
+       case COMPAT_ION_IOC_ALLOC:
+       {
+               struct compat_ion_allocation_data __user *data32;
+               struct ion_allocation_data __user *data;
+               int err;
+
+               data32 = compat_ptr(arg);
+               data = compat_alloc_user_space(sizeof(*data));
+               if (data == NULL)
+                       return -EFAULT;
+
+               err = compat_get_ion_allocation_data(data32, data);
+               if (err)
+                       return err;
+               ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC,
+                                                       (unsigned long)data);
+               err = compat_put_ion_allocation_data(data32, data);
+               return ret ? ret : err;
+       }
+       case COMPAT_ION_IOC_FREE:
+       {
+               struct compat_ion_allocation_data __user *data32;
+               struct ion_allocation_data __user *data;
+               int err;
+
+               data32 = compat_ptr(arg);
+               data = compat_alloc_user_space(sizeof(*data));
+               if (data == NULL)
+                       return -EFAULT;
+
+               err = compat_get_ion_allocation_data(data32, data);
+               if (err)
+                       return err;
+
+               return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE,
+                                                       (unsigned long)data);
+       }
+       case COMPAT_ION_IOC_CUSTOM: {
+               struct compat_ion_custom_data __user *data32;
+               struct ion_custom_data __user *data;
+               int err;
+
+               data32 = compat_ptr(arg);
+               data = compat_alloc_user_space(sizeof(*data));
+               if (data == NULL)
+                       return -EFAULT;
+
+               err = compat_get_ion_custom_data(data32, data);
+               if (err)
+                       return err;
+
+               return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
+                                                       (unsigned long)data);
+       }
+       case ION_IOC_SHARE:
+       case ION_IOC_MAP:
+       case ION_IOC_IMPORT:
+       case ION_IOC_SYNC:
+               return filp->f_op->unlocked_ioctl(filp, cmd,
+                                               (unsigned long)compat_ptr(arg));
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h
new file mode 100644 (file)
index 0000000..3a9c8c0
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+
+ * drivers/gpu/ion/compat_ion.h
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_COMPAT_ION_H
+#define _LINUX_COMPAT_ION_H
+
+#if IS_ENABLED(CONFIG_COMPAT)
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+#else
+
+#define compat_ion_ioctl  NULL
+
+#endif /* CONFIG_COMPAT */
+#endif /* _LINUX_COMPAT_ION_H */
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
new file mode 100644 (file)
index 0000000..7522b0b
--- /dev/null
@@ -0,0 +1,1549 @@
+/*
+
+ * drivers/gpu/ion/ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/miscdevice.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/idr.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+#include "compat_ion.h"
+
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev:               the actual misc device
+ * @buffers:           an rb tree of all the existing buffers
+ * @buffer_lock:       lock protecting the tree of buffers
+ * @lock:              rwsem protecting the tree of heaps and clients
+ * @heaps:             list of all the heaps in the system
+ * @user_clients:      list of all the clients created from userspace
+ */
+struct ion_device {
+       struct miscdevice dev;
+       struct rb_root buffers;
+       struct mutex buffer_lock;
+       struct rw_semaphore lock;
+       struct plist_head heaps;
+       long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
+                             unsigned long arg);
+       struct rb_root clients;
+       struct dentry *debug_root;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @node:              node in the tree of all clients
+ * @dev:               backpointer to ion device
+ * @handles:           an rb tree of all the handles in this client
+ * @idr:               an idr space for allocating handle ids
+ * @lock:              lock protecting the tree of handles
+ * @name:              used for debugging
+ * @task:              used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+       struct rb_node node;
+       struct ion_device *dev;
+       struct rb_root handles;
+       struct idr idr;
+       struct mutex lock;
+       const char *name;
+       struct task_struct *task;
+       pid_t pid;
+       struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref:               reference count
+ * @client:            back pointer to the client the buffer resides in
+ * @buffer:            pointer to the buffer
+ * @node:              node in the client's handle rbtree
+ * @kmap_cnt:          count of times this client has mapped to kernel
+ * @id:                        client-unique id allocated by client->idr
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client.  Other fields are never changed after initialization.
+ */
+struct ion_handle {
+       struct kref ref;
+       struct ion_client *client;
+       struct ion_buffer *buffer;
+       struct rb_node node;
+       unsigned int kmap_cnt;
+       int id;
+};
+
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
+{
+       return (buffer->flags & ION_FLAG_CACHED) &&
+               !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
+}
+
+bool ion_buffer_cached(struct ion_buffer *buffer)
+{
+       return !!(buffer->flags & ION_FLAG_CACHED);
+}
+
+static inline struct page *ion_buffer_page(struct page *page)
+{
+       return (struct page *)((unsigned long)page & ~(1UL));
+}
+
+static inline bool ion_buffer_page_is_dirty(struct page *page)
+{
+       return !!((unsigned long)page & 1UL);
+}
+
+static inline void ion_buffer_page_dirty(struct page **page)
+{
+       *page = (struct page *)((unsigned long)(*page) | 1UL);
+}
+
+static inline void ion_buffer_page_clean(struct page **page)
+{
+       *page = (struct page *)((unsigned long)(*page) & ~(1UL));
+}
+
+/* this function should only be called while dev->lock is held */
+static void ion_buffer_add(struct ion_device *dev,
+                          struct ion_buffer *buffer)
+{
+       struct rb_node **p = &dev->buffers.rb_node;
+       struct rb_node *parent = NULL;
+       struct ion_buffer *entry;
+
+       while (*p) {
+               parent = *p;
+               entry = rb_entry(parent, struct ion_buffer, node);
+
+               if (buffer < entry) {
+                       p = &(*p)->rb_left;
+               } else if (buffer > entry) {
+                       p = &(*p)->rb_right;
+               } else {
+                       pr_err("%s: buffer already found.", __func__);
+                       BUG();
+               }
+       }
+
+       rb_link_node(&buffer->node, parent, p);
+       rb_insert_color(&buffer->node, &dev->buffers);
+}
+
+/* this function should only be called while dev->lock is held */
+static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+                                    struct ion_device *dev,
+                                    unsigned long len,
+                                    unsigned long align,
+                                    unsigned long flags)
+{
+       struct ion_buffer *buffer;
+       struct sg_table *table;
+       struct scatterlist *sg;
+       int i, ret;
+
+       buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
+       if (!buffer)
+               return ERR_PTR(-ENOMEM);
+
+       buffer->heap = heap;
+       buffer->flags = flags;
+       kref_init(&buffer->ref);
+
+       ret = heap->ops->allocate(heap, buffer, len, align, flags);
+
+       if (ret) {
+               if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
+                       goto err2;
+
+               ion_heap_freelist_drain(heap, 0);
+               ret = heap->ops->allocate(heap, buffer, len, align,
+                                         flags);
+               if (ret)
+                       goto err2;
+       }
+
+       buffer->dev = dev;
+       buffer->size = len;
+
+       table = heap->ops->map_dma(heap, buffer);
+       if (WARN_ONCE(table == NULL,
+                       "heap->ops->map_dma should return ERR_PTR on error"))
+               table = ERR_PTR(-EINVAL);
+       if (IS_ERR(table)) {
+               heap->ops->free(buffer);
+               kfree(buffer);
+               return ERR_PTR(PTR_ERR(table));
+       }
+       buffer->sg_table = table;
+       if (ion_buffer_fault_user_mappings(buffer)) {
+               int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+               struct scatterlist *sg;
+               int i, j, k = 0;
+
+               buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
+               if (!buffer->pages) {
+                       ret = -ENOMEM;
+                       goto err1;
+               }
+
+               for_each_sg(table->sgl, sg, table->nents, i) {
+                       struct page *page = sg_page(sg);
+
+                       for (j = 0; j < sg->length / PAGE_SIZE; j++)
+                               buffer->pages[k++] = page++;
+               }
+
+               if (ret)
+                       goto err;
+       }
+
+       buffer->dev = dev;
+       buffer->size = len;
+       INIT_LIST_HEAD(&buffer->vmas);
+       mutex_init(&buffer->lock);
+       /* this will set up dma addresses for the sglist -- it is not
+          technically correct as per the dma api -- a specific
+          device isn't really taking ownership here.  However, in practice on
+          our systems the only dma_address space is physical addresses.
+          Additionally, we can't afford the overhead of invalidating every
+          allocation via dma_map_sg. The implicit contract here is that
+          memory comming from the heaps is ready for dma, ie if it has a
+          cached mapping that mapping has been invalidated */
+       for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
+               sg_dma_address(sg) = sg_phys(sg);
+       mutex_lock(&dev->buffer_lock);
+       ion_buffer_add(dev, buffer);
+       mutex_unlock(&dev->buffer_lock);
+       return buffer;
+
+err:
+       heap->ops->unmap_dma(heap, buffer);
+       heap->ops->free(buffer);
+err1:
+       if (buffer->pages)
+               vfree(buffer->pages);
+err2:
+       kfree(buffer);
+       return ERR_PTR(ret);
+}
+
+void ion_buffer_destroy(struct ion_buffer *buffer)
+{
+       if (WARN_ON(buffer->kmap_cnt > 0))
+               buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+       buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+       buffer->heap->ops->free(buffer);
+       if (buffer->pages)
+               vfree(buffer->pages);
+       kfree(buffer);
+}
+
+static void _ion_buffer_destroy(struct kref *kref)
+{
+       struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+       struct ion_heap *heap = buffer->heap;
+       struct ion_device *dev = buffer->dev;
+
+       mutex_lock(&dev->buffer_lock);
+       rb_erase(&buffer->node, &dev->buffers);
+       mutex_unlock(&dev->buffer_lock);
+
+       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+               ion_heap_freelist_add(heap, buffer);
+       else
+               ion_buffer_destroy(buffer);
+}
+
+static void ion_buffer_get(struct ion_buffer *buffer)
+{
+       kref_get(&buffer->ref);
+}
+
+static int ion_buffer_put(struct ion_buffer *buffer)
+{
+       return kref_put(&buffer->ref, _ion_buffer_destroy);
+}
+
+static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
+{
+       mutex_lock(&buffer->lock);
+       buffer->handle_count++;
+       mutex_unlock(&buffer->lock);
+}
+
+static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
+{
+       /*
+        * when a buffer is removed from a handle, if it is not in
+        * any other handles, copy the taskcomm and the pid of the
+        * process it's being removed from into the buffer.  At this
+        * point there will be no way to track what processes this buffer is
+        * being used by, it only exists as a dma_buf file descriptor.
+        * The taskcomm and pid can provide a debug hint as to where this fd
+        * is in the system
+        */
+       mutex_lock(&buffer->lock);
+       buffer->handle_count--;
+       BUG_ON(buffer->handle_count < 0);
+       if (!buffer->handle_count) {
+               struct task_struct *task;
+
+               task = current->group_leader;
+               get_task_comm(buffer->task_comm, task);
+               buffer->pid = task_pid_nr(task);
+       }
+       mutex_unlock(&buffer->lock);
+}
+
+static struct ion_handle *ion_handle_create(struct ion_client *client,
+                                    struct ion_buffer *buffer)
+{
+       struct ion_handle *handle;
+
+       handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
+       if (!handle)
+               return ERR_PTR(-ENOMEM);
+       kref_init(&handle->ref);
+       RB_CLEAR_NODE(&handle->node);
+       handle->client = client;
+       ion_buffer_get(buffer);
+       ion_buffer_add_to_handle(buffer);
+       handle->buffer = buffer;
+
+       return handle;
+}
+
+static void ion_handle_kmap_put(struct ion_handle *);
+
+static void ion_handle_destroy(struct kref *kref)
+{
+       struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
+       struct ion_client *client = handle->client;
+       struct ion_buffer *buffer = handle->buffer;
+
+       mutex_lock(&buffer->lock);
+       while (handle->kmap_cnt)
+               ion_handle_kmap_put(handle);
+       mutex_unlock(&buffer->lock);
+
+       idr_remove(&client->idr, handle->id);
+       if (!RB_EMPTY_NODE(&handle->node))
+               rb_erase(&handle->node, &client->handles);
+
+       ion_buffer_remove_from_handle(buffer);
+       ion_buffer_put(buffer);
+
+       kfree(handle);
+}
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
+{
+       return handle->buffer;
+}
+
+static void ion_handle_get(struct ion_handle *handle)
+{
+       kref_get(&handle->ref);
+}
+
+static int ion_handle_put(struct ion_handle *handle)
+{
+       struct ion_client *client = handle->client;
+       int ret;
+
+       mutex_lock(&client->lock);
+       ret = kref_put(&handle->ref, ion_handle_destroy);
+       mutex_unlock(&client->lock);
+
+       return ret;
+}
+
+static struct ion_handle *ion_handle_lookup(struct ion_client *client,
+                                           struct ion_buffer *buffer)
+{
+       struct rb_node *n = client->handles.rb_node;
+
+       while (n) {
+               struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
+               if (buffer < entry->buffer)
+                       n = n->rb_left;
+               else if (buffer > entry->buffer)
+                       n = n->rb_right;
+               else
+                       return entry;
+       }
+       return ERR_PTR(-EINVAL);
+}
+
+static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+                                               int id)
+{
+       struct ion_handle *handle;
+
+       mutex_lock(&client->lock);
+       handle = idr_find(&client->idr, id);
+       if (handle)
+               ion_handle_get(handle);
+       mutex_unlock(&client->lock);
+
+       return handle ? handle : ERR_PTR(-EINVAL);
+}
+
+static bool ion_handle_validate(struct ion_client *client,
+                               struct ion_handle *handle)
+{
+       WARN_ON(!mutex_is_locked(&client->lock));
+       return (idr_find(&client->idr, handle->id) == handle);
+}
+
+static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+{
+       int id;
+       struct rb_node **p = &client->handles.rb_node;
+       struct rb_node *parent = NULL;
+       struct ion_handle *entry;
+
+       id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
+       if (id < 0)
+               return id;
+
+       handle->id = id;
+
+       while (*p) {
+               parent = *p;
+               entry = rb_entry(parent, struct ion_handle, node);
+
+               if (handle->buffer < entry->buffer)
+                       p = &(*p)->rb_left;
+               else if (handle->buffer > entry->buffer)
+                       p = &(*p)->rb_right;
+               else
+                       WARN(1, "%s: buffer already found.", __func__);
+       }
+
+       rb_link_node(&handle->node, parent, p);
+       rb_insert_color(&handle->node, &client->handles);
+
+       return 0;
+}
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+                            size_t align, unsigned int heap_id_mask,
+                            unsigned int flags)
+{
+       struct ion_handle *handle;
+       struct ion_device *dev = client->dev;
+       struct ion_buffer *buffer = NULL;
+       struct ion_heap *heap;
+       int ret;
+
+       pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
+                len, align, heap_id_mask, flags);
+       /*
+        * traverse the list of heaps available in this system in priority
+        * order.  If the heap type is supported by the client, and matches the
+        * request of the caller allocate from it.  Repeat until allocate has
+        * succeeded or all heaps have been tried
+        */
+       len = PAGE_ALIGN(len);
+
+       if (!len)
+               return ERR_PTR(-EINVAL);
+
+       down_read(&dev->lock);
+       plist_for_each_entry(heap, &dev->heaps, node) {
+               /* if the caller didn't specify this heap id */
+               if (!((1 << heap->id) & heap_id_mask))
+                       continue;
+               buffer = ion_buffer_create(heap, dev, len, align, flags);
+               if (!IS_ERR(buffer))
+                       break;
+       }
+       up_read(&dev->lock);
+
+       if (buffer == NULL)
+               return ERR_PTR(-ENODEV);
+
+       if (IS_ERR(buffer))
+               return ERR_PTR(PTR_ERR(buffer));
+
+       handle = ion_handle_create(client, buffer);
+
+       /*
+        * ion_buffer_create will create a buffer with a ref_cnt of 1,
+        * and ion_handle_create will take a second reference, drop one here
+        */
+       ion_buffer_put(buffer);
+
+       if (IS_ERR(handle))
+               return handle;
+
+       mutex_lock(&client->lock);
+       ret = ion_handle_add(client, handle);
+       mutex_unlock(&client->lock);
+       if (ret) {
+               ion_handle_put(handle);
+               handle = ERR_PTR(ret);
+       }
+
+       return handle;
+}
+EXPORT_SYMBOL(ion_alloc);
+
+void ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+       bool valid_handle;
+
+       BUG_ON(client != handle->client);
+
+       mutex_lock(&client->lock);
+       valid_handle = ion_handle_validate(client, handle);
+
+       if (!valid_handle) {
+               WARN(1, "%s: invalid handle passed to free.\n", __func__);
+               mutex_unlock(&client->lock);
+               return;
+       }
+       mutex_unlock(&client->lock);
+       ion_handle_put(handle);
+}
+EXPORT_SYMBOL(ion_free);
+
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+            ion_phys_addr_t *addr, size_t *len)
+{
+       struct ion_buffer *buffer;
+       int ret;
+
+       mutex_lock(&client->lock);
+       if (!ion_handle_validate(client, handle)) {
+               mutex_unlock(&client->lock);
+               return -EINVAL;
+       }
+
+       buffer = handle->buffer;
+
+       if (!buffer->heap->ops->phys) {
+               pr_err("%s: ion_phys is not implemented by this heap.\n",
+                      __func__);
+               mutex_unlock(&client->lock);
+               return -ENODEV;
+       }
+       mutex_unlock(&client->lock);
+       ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+       return ret;
+}
+EXPORT_SYMBOL(ion_phys);
+
+static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
+{
+       void *vaddr;
+
+       if (buffer->kmap_cnt) {
+               buffer->kmap_cnt++;
+               return buffer->vaddr;
+       }
+       vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+       if (WARN_ONCE(vaddr == NULL,
+                       "heap->ops->map_kernel should return ERR_PTR on error"))
+               return ERR_PTR(-EINVAL);
+       if (IS_ERR(vaddr))
+               return vaddr;
+       buffer->vaddr = vaddr;
+       buffer->kmap_cnt++;
+       return vaddr;
+}
+
+static void *ion_handle_kmap_get(struct ion_handle *handle)
+{
+       struct ion_buffer *buffer = handle->buffer;
+       void *vaddr;
+
+       if (handle->kmap_cnt) {
+               handle->kmap_cnt++;
+               return buffer->vaddr;
+       }
+       vaddr = ion_buffer_kmap_get(buffer);
+       if (IS_ERR(vaddr))
+               return vaddr;
+       handle->kmap_cnt++;
+       return vaddr;
+}
+
+static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+{
+       buffer->kmap_cnt--;
+       if (!buffer->kmap_cnt) {
+               buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+               buffer->vaddr = NULL;
+       }
+}
+
+static void ion_handle_kmap_put(struct ion_handle *handle)
+{
+       struct ion_buffer *buffer = handle->buffer;
+
+       handle->kmap_cnt--;
+       if (!handle->kmap_cnt)
+               ion_buffer_kmap_put(buffer);
+}
+
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+       struct ion_buffer *buffer;
+       void *vaddr;
+
+       mutex_lock(&client->lock);
+       if (!ion_handle_validate(client, handle)) {
+               pr_err("%s: invalid handle passed to map_kernel.\n",
+                      __func__);
+               mutex_unlock(&client->lock);
+               return ERR_PTR(-EINVAL);
+       }
+
+       buffer = handle->buffer;
+
+       if (!handle->buffer->heap->ops->map_kernel) {
+               pr_err("%s: map_kernel is not implemented by this heap.\n",
+                      __func__);
+               mutex_unlock(&client->lock);
+               return ERR_PTR(-ENODEV);
+       }
+
+       mutex_lock(&buffer->lock);
+       vaddr = ion_handle_kmap_get(handle);
+       mutex_unlock(&buffer->lock);
+       mutex_unlock(&client->lock);
+       return vaddr;
+}
+EXPORT_SYMBOL(ion_map_kernel);
+
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+       struct ion_buffer *buffer;
+
+       mutex_lock(&client->lock);
+       buffer = handle->buffer;
+       mutex_lock(&buffer->lock);
+       ion_handle_kmap_put(handle);
+       mutex_unlock(&buffer->lock);
+       mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_unmap_kernel);
+
+static int ion_debug_client_show(struct seq_file *s, void *unused)
+{
+       struct ion_client *client = s->private;
+       struct rb_node *n;
+       size_t sizes[ION_NUM_HEAP_IDS] = {0};
+       const char *names[ION_NUM_HEAP_IDS] = {NULL};
+       int i;
+
+       mutex_lock(&client->lock);
+       for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+               struct ion_handle *handle = rb_entry(n, struct ion_handle,
+                                                    node);
+               unsigned int id = handle->buffer->heap->id;
+
+               if (!names[id])
+                       names[id] = handle->buffer->heap->name;
+               sizes[id] += handle->buffer->size;
+       }
+       mutex_unlock(&client->lock);
+
+       seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
+       for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
+               if (!names[i])
+                       continue;
+               seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
+       }
+       return 0;
+}
+
+static int ion_debug_client_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, ion_debug_client_show, inode->i_private);
+}
+
+static const struct file_operations debug_client_fops = {
+       .open = ion_debug_client_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+struct ion_client *ion_client_create(struct ion_device *dev,
+                                    const char *name)
+{
+       struct ion_client *client;
+       struct task_struct *task;
+       struct rb_node **p;
+       struct rb_node *parent = NULL;
+       struct ion_client *entry;
+       char debug_name[64];
+       pid_t pid;
+
+       get_task_struct(current->group_leader);
+       task_lock(current->group_leader);
+       pid = task_pid_nr(current->group_leader);
+       /* don't bother to store task struct for kernel threads,
+          they can't be killed anyway */
+       if (current->group_leader->flags & PF_KTHREAD) {
+               put_task_struct(current->group_leader);
+               task = NULL;
+       } else {
+               task = current->group_leader;
+       }
+       task_unlock(current->group_leader);
+
+       client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
+       if (!client) {
+               if (task)
+                       put_task_struct(current->group_leader);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       client->dev = dev;
+       client->handles = RB_ROOT;
+       idr_init(&client->idr);
+       mutex_init(&client->lock);
+       client->name = name;
+       client->task = task;
+       client->pid = pid;
+
+       down_write(&dev->lock);
+       p = &dev->clients.rb_node;
+       while (*p) {
+               parent = *p;
+               entry = rb_entry(parent, struct ion_client, node);
+
+               if (client < entry)
+                       p = &(*p)->rb_left;
+               else if (client > entry)
+                       p = &(*p)->rb_right;
+       }
+       rb_link_node(&client->node, parent, p);
+       rb_insert_color(&client->node, &dev->clients);
+
+       snprintf(debug_name, 64, "%u", client->pid);
+       client->debug_root = debugfs_create_file(debug_name, 0664,
+                                                dev->debug_root, client,
+                                                &debug_client_fops);
+       up_write(&dev->lock);
+
+       return client;
+}
+EXPORT_SYMBOL(ion_client_create);
+
+void ion_client_destroy(struct ion_client *client)
+{
+       struct ion_device *dev = client->dev;
+       struct rb_node *n;
+
+       pr_debug("%s: %d\n", __func__, __LINE__);
+       while ((n = rb_first(&client->handles))) {
+               struct ion_handle *handle = rb_entry(n, struct ion_handle,
+                                                    node);
+               ion_handle_destroy(&handle->ref);
+       }
+
+       idr_destroy(&client->idr);
+
+       down_write(&dev->lock);
+       if (client->task)
+               put_task_struct(client->task);
+       rb_erase(&client->node, &dev->clients);
+       debugfs_remove_recursive(client->debug_root);
+       up_write(&dev->lock);
+
+       kfree(client);
+}
+EXPORT_SYMBOL(ion_client_destroy);
+
+struct sg_table *ion_sg_table(struct ion_client *client,
+                             struct ion_handle *handle)
+{
+       struct ion_buffer *buffer;
+       struct sg_table *table;
+
+       mutex_lock(&client->lock);
+       if (!ion_handle_validate(client, handle)) {
+               pr_err("%s: invalid handle passed to map_dma.\n",
+                      __func__);
+               mutex_unlock(&client->lock);
+               return ERR_PTR(-EINVAL);
+       }
+       buffer = handle->buffer;
+       table = buffer->sg_table;
+       mutex_unlock(&client->lock);
+       return table;
+}
+EXPORT_SYMBOL(ion_sg_table);
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+                                      struct device *dev,
+                                      enum dma_data_direction direction);
+
+static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
+                                       enum dma_data_direction direction)
+{
+       struct dma_buf *dmabuf = attachment->dmabuf;
+       struct ion_buffer *buffer = dmabuf->priv;
+
+       ion_buffer_sync_for_device(buffer, attachment->dev, direction);
+       return buffer->sg_table;
+}
+
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+                             struct sg_table *table,
+                             enum dma_data_direction direction)
+{
+}
+
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+               size_t size, enum dma_data_direction dir)
+{
+       struct scatterlist sg;
+
+       sg_init_table(&sg, 1);
+       sg_set_page(&sg, page, size, 0);
+       /*
+        * This is not correct - sg_dma_address needs a dma_addr_t that is valid
+        * for the the targeted device, but this works on the currently targeted
+        * hardware.
+        */
+       sg_dma_address(&sg) = page_to_phys(page);
+       dma_sync_sg_for_device(dev, &sg, 1, dir);
+}
+
+struct ion_vma_list {
+       struct list_head list;
+       struct vm_area_struct *vma;
+};
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+                                      struct device *dev,
+                                      enum dma_data_direction dir)
+{
+       struct ion_vma_list *vma_list;
+       int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+       int i;
+
+       pr_debug("%s: syncing for device %s\n", __func__,
+                dev ? dev_name(dev) : "null");
+
+       if (!ion_buffer_fault_user_mappings(buffer))
+               return;
+
+       mutex_lock(&buffer->lock);
+       for (i = 0; i < pages; i++) {
+               struct page *page = buffer->pages[i];
+
+               if (ion_buffer_page_is_dirty(page))
+                       ion_pages_sync_for_device(dev, ion_buffer_page(page),
+                                                       PAGE_SIZE, dir);
+
+               ion_buffer_page_clean(buffer->pages + i);
+       }
+       list_for_each_entry(vma_list, &buffer->vmas, list) {
+               struct vm_area_struct *vma = vma_list->vma;
+
+               zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
+                              NULL);
+       }
+       mutex_unlock(&buffer->lock);
+}
+
+static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct ion_buffer *buffer = vma->vm_private_data;
+       unsigned long pfn;
+       int ret;
+
+       mutex_lock(&buffer->lock);
+       ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
+       BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
+
+       pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
+       ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+       mutex_unlock(&buffer->lock);
+       if (ret)
+               return VM_FAULT_ERROR;
+
+       return VM_FAULT_NOPAGE;
+}
+
+static void ion_vm_open(struct vm_area_struct *vma)
+{
+       struct ion_buffer *buffer = vma->vm_private_data;
+       struct ion_vma_list *vma_list;
+
+       vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
+       if (!vma_list)
+               return;
+       vma_list->vma = vma;
+       mutex_lock(&buffer->lock);
+       list_add(&vma_list->list, &buffer->vmas);
+       mutex_unlock(&buffer->lock);
+       pr_debug("%s: adding %p\n", __func__, vma);
+}
+
+static void ion_vm_close(struct vm_area_struct *vma)
+{
+       struct ion_buffer *buffer = vma->vm_private_data;
+       struct ion_vma_list *vma_list, *tmp;
+
+       pr_debug("%s\n", __func__);
+       mutex_lock(&buffer->lock);
+       list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
+               if (vma_list->vma != vma)
+                       continue;
+               list_del(&vma_list->list);
+               kfree(vma_list);
+               pr_debug("%s: deleting %p\n", __func__, vma);
+               break;
+       }
+       mutex_unlock(&buffer->lock);
+}
+
+static struct vm_operations_struct ion_vma_ops = {
+       .open = ion_vm_open,
+       .close = ion_vm_close,
+       .fault = ion_vm_fault,
+};
+
+static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+       struct ion_buffer *buffer = dmabuf->priv;
+       int ret = 0;
+
+       if (!buffer->heap->ops->map_user) {
+               pr_err("%s: this heap does not define a method for mapping "
+                      "to userspace\n", __func__);
+               return -EINVAL;
+       }
+
+       if (ion_buffer_fault_user_mappings(buffer)) {
+               vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
+                                                       VM_DONTDUMP;
+               vma->vm_private_data = buffer;
+               vma->vm_ops = &ion_vma_ops;
+               ion_vm_open(vma);
+               return 0;
+       }
+
+       if (!(buffer->flags & ION_FLAG_CACHED))
+               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+       mutex_lock(&buffer->lock);
+       /* now map it to userspace */
+       ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+       mutex_unlock(&buffer->lock);
+
+       if (ret)
+               pr_err("%s: failure mapping buffer to userspace\n",
+                      __func__);
+
+       return ret;
+}
+
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+       struct ion_buffer *buffer = dmabuf->priv;
+       ion_buffer_put(buffer);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+       struct ion_buffer *buffer = dmabuf->priv;
+       return buffer->vaddr + offset * PAGE_SIZE;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+                              void *ptr)
+{
+       return;
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
+                                       size_t len,
+                                       enum dma_data_direction direction)
+{
+       struct ion_buffer *buffer = dmabuf->priv;
+       void *vaddr;
+
+       if (!buffer->heap->ops->map_kernel) {
+               pr_err("%s: map kernel is not implemented by this heap.\n",
+                      __func__);
+               return -ENODEV;
+       }
+
+       mutex_lock(&buffer->lock);
+       vaddr = ion_buffer_kmap_get(buffer);
+       mutex_unlock(&buffer->lock);
+       if (IS_ERR(vaddr))
+               return PTR_ERR(vaddr);
+       return 0;
+}
+
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
+                                      size_t len,
+                                      enum dma_data_direction direction)
+{
+       struct ion_buffer *buffer = dmabuf->priv;
+
+       mutex_lock(&buffer->lock);
+       ion_buffer_kmap_put(buffer);
+       mutex_unlock(&buffer->lock);
+}
+
+static struct dma_buf_ops dma_buf_ops = {
+       .map_dma_buf = ion_map_dma_buf,
+       .unmap_dma_buf = ion_unmap_dma_buf,
+       .mmap = ion_mmap,
+       .release = ion_dma_buf_release,
+       .begin_cpu_access = ion_dma_buf_begin_cpu_access,
+       .end_cpu_access = ion_dma_buf_end_cpu_access,
+       .kmap_atomic = ion_dma_buf_kmap,
+       .kunmap_atomic = ion_dma_buf_kunmap,
+       .kmap = ion_dma_buf_kmap,
+       .kunmap = ion_dma_buf_kunmap,
+};
+
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+                                               struct ion_handle *handle)
+{
+       struct ion_buffer *buffer;
+       struct dma_buf *dmabuf;
+       bool valid_handle;
+
+       mutex_lock(&client->lock);
+       valid_handle = ion_handle_validate(client, handle);
+       if (!valid_handle) {
+               WARN(1, "%s: invalid handle passed to share.\n", __func__);
+               mutex_unlock(&client->lock);
+               return ERR_PTR(-EINVAL);
+       }
+       buffer = handle->buffer;
+       ion_buffer_get(buffer);
+       mutex_unlock(&client->lock);
+
+       dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
+       if (IS_ERR(dmabuf)) {
+               ion_buffer_put(buffer);
+               return dmabuf;
+       }
+
+       return dmabuf;
+}
+EXPORT_SYMBOL(ion_share_dma_buf);
+
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
+{
+       struct dma_buf *dmabuf;
+       int fd;
+
+       dmabuf = ion_share_dma_buf(client, handle);
+       if (IS_ERR(dmabuf))
+               return PTR_ERR(dmabuf);
+
+       fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+       if (fd < 0)
+               dma_buf_put(dmabuf);
+
+       return fd;
+}
+EXPORT_SYMBOL(ion_share_dma_buf_fd);
+
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+       struct dma_buf *dmabuf;
+       struct ion_buffer *buffer;
+       struct ion_handle *handle;
+       int ret;
+
+       dmabuf = dma_buf_get(fd);
+       if (IS_ERR(dmabuf))
+               return ERR_PTR(PTR_ERR(dmabuf));
+       /* if this memory came from ion */
+
+       if (dmabuf->ops != &dma_buf_ops) {
+               pr_err("%s: can not import dmabuf from another exporter\n",
+                      __func__);
+               dma_buf_put(dmabuf);
+               return ERR_PTR(-EINVAL);
+       }
+       buffer = dmabuf->priv;
+
+       mutex_lock(&client->lock);
+       /* if a handle exists for this buffer just take a reference to it */
+       handle = ion_handle_lookup(client, buffer);
+       if (!IS_ERR(handle)) {
+               ion_handle_get(handle);
+               mutex_unlock(&client->lock);
+               goto end;
+       }
+       mutex_unlock(&client->lock);
+
+       handle = ion_handle_create(client, buffer);
+       if (IS_ERR(handle))
+               goto end;
+
+       mutex_lock(&client->lock);
+       ret = ion_handle_add(client, handle);
+       mutex_unlock(&client->lock);
+       if (ret) {
+               ion_handle_put(handle);
+               handle = ERR_PTR(ret);
+       }
+
+end:
+       dma_buf_put(dmabuf);
+       return handle;
+}
+EXPORT_SYMBOL(ion_import_dma_buf);
+
+static int ion_sync_for_device(struct ion_client *client, int fd)
+{
+       struct dma_buf *dmabuf;
+       struct ion_buffer *buffer;
+
+       dmabuf = dma_buf_get(fd);
+       if (IS_ERR(dmabuf))
+               return PTR_ERR(dmabuf);
+
+       /* if this memory came from ion */
+       if (dmabuf->ops != &dma_buf_ops) {
+               pr_err("%s: can not sync dmabuf from another exporter\n",
+                      __func__);
+               dma_buf_put(dmabuf);
+               return -EINVAL;
+       }
+       buffer = dmabuf->priv;
+
+       dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+                              buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+       dma_buf_put(dmabuf);
+       return 0;
+}
+
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
+{
+       switch (cmd) {
+       case ION_IOC_SYNC:
+       case ION_IOC_FREE:
+       case ION_IOC_CUSTOM:
+               return _IOC_WRITE;
+       default:
+               return _IOC_DIR(cmd);
+       }
+}
+
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       struct ion_client *client = filp->private_data;
+       struct ion_device *dev = client->dev;
+       struct ion_handle *cleanup_handle = NULL;
+       int ret = 0;
+       unsigned int dir;
+
+       union {
+               struct ion_fd_data fd;
+               struct ion_allocation_data allocation;
+               struct ion_handle_data handle;
+               struct ion_custom_data custom;
+       } data;
+
+       dir = ion_ioctl_dir(cmd);
+
+       if (_IOC_SIZE(cmd) > sizeof(data))
+               return -EINVAL;
+
+       if (dir & _IOC_WRITE)
+               if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+                       return -EFAULT;
+
+       switch (cmd) {
+       case ION_IOC_ALLOC:
+       {
+               struct ion_handle *handle;
+
+               handle = ion_alloc(client, data.allocation.len,
+                                               data.allocation.align,
+                                               data.allocation.heap_id_mask,
+                                               data.allocation.flags);
+               if (IS_ERR(handle))
+                       return PTR_ERR(handle);
+
+               data.allocation.handle = handle->id;
+
+               cleanup_handle = handle;
+               break;
+       }
+       case ION_IOC_FREE:
+       {
+               struct ion_handle *handle;
+
+               handle = ion_handle_get_by_id(client, data.handle.handle);
+               if (IS_ERR(handle))
+                       return PTR_ERR(handle);
+               ion_free(client, handle);
+               ion_handle_put(handle);
+               break;
+       }
+       case ION_IOC_SHARE:
+       case ION_IOC_MAP:
+       {
+               struct ion_handle *handle;
+
+               handle = ion_handle_get_by_id(client, data.handle.handle);
+               if (IS_ERR(handle))
+                       return PTR_ERR(handle);
+               data.fd.fd = ion_share_dma_buf_fd(client, handle);
+               ion_handle_put(handle);
+               if (data.fd.fd < 0)
+                       ret = data.fd.fd;
+               break;
+       }
+       case ION_IOC_IMPORT:
+       {
+               struct ion_handle *handle;
+               handle = ion_import_dma_buf(client, data.fd.fd);
+               if (IS_ERR(handle))
+                       ret = PTR_ERR(handle);
+               else
+                       data.handle.handle = handle->id;
+               break;
+       }
+       case ION_IOC_SYNC:
+       {
+               ret = ion_sync_for_device(client, data.fd.fd);
+               break;
+       }
+       case ION_IOC_CUSTOM:
+       {
+               if (!dev->custom_ioctl)
+                       return -ENOTTY;
+               ret = dev->custom_ioctl(client, data.custom.cmd,
+                                               data.custom.arg);
+               break;
+       }
+       default:
+               return -ENOTTY;
+       }
+
+       if (dir & _IOC_READ) {
+               if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+                       if (cleanup_handle)
+                               ion_free(client, cleanup_handle);
+                       return -EFAULT;
+               }
+       }
+       return ret;
+}
+
+static int ion_release(struct inode *inode, struct file *file)
+{
+       struct ion_client *client = file->private_data;
+
+       pr_debug("%s: %d\n", __func__, __LINE__);
+       ion_client_destroy(client);
+       return 0;
+}
+
+static int ion_open(struct inode *inode, struct file *file)
+{
+       struct miscdevice *miscdev = file->private_data;
+       struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
+       struct ion_client *client;
+
+       pr_debug("%s: %d\n", __func__, __LINE__);
+       client = ion_client_create(dev, "user");
+       if (IS_ERR(client))
+               return PTR_ERR(client);
+       file->private_data = client;
+
+       return 0;
+}
+
+static const struct file_operations ion_fops = {
+       .owner          = THIS_MODULE,
+       .open           = ion_open,
+       .release        = ion_release,
+       .unlocked_ioctl = ion_ioctl,
+       .compat_ioctl   = compat_ion_ioctl,
+};
+
+static size_t ion_debug_heap_total(struct ion_client *client,
+                                  unsigned int id)
+{
+       size_t size = 0;
+       struct rb_node *n;
+
+       mutex_lock(&client->lock);
+       for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+               struct ion_handle *handle = rb_entry(n,
+                                                    struct ion_handle,
+                                                    node);
+               if (handle->buffer->heap->id == id)
+                       size += handle->buffer->size;
+       }
+       mutex_unlock(&client->lock);
+       return size;
+}
+
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+       struct ion_heap *heap = s->private;
+       struct ion_device *dev = heap->dev;
+       struct rb_node *n;
+       size_t total_size = 0;
+       size_t total_orphaned_size = 0;
+
+       seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
+       seq_printf(s, "----------------------------------------------------\n");
+
+       for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
+               struct ion_client *client = rb_entry(n, struct ion_client,
+                                                    node);
+               size_t size = ion_debug_heap_total(client, heap->id);
+               if (!size)
+                       continue;
+               if (client->task) {
+                       char task_comm[TASK_COMM_LEN];
+
+                       get_task_comm(task_comm, client->task);
+                       seq_printf(s, "%16.s %16u %16zu\n", task_comm,
+                                  client->pid, size);
+               } else {
+                       seq_printf(s, "%16.s %16u %16zu\n", client->name,
+                                  client->pid, size);
+               }
+       }
+       seq_printf(s, "----------------------------------------------------\n");
+       seq_printf(s, "orphaned allocations (info is from last known client):"
+                  "\n");
+       mutex_lock(&dev->buffer_lock);
+       for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+               struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
+                                                    node);
+               if (buffer->heap->id != heap->id)
+                       continue;
+               total_size += buffer->size;
+               if (!buffer->handle_count) {
+                       seq_printf(s, "%16.s %16u %16zu %d %d\n",
+                                  buffer->task_comm, buffer->pid,
+                                  buffer->size, buffer->kmap_cnt,
+                                  atomic_read(&buffer->ref.refcount));
+                       total_orphaned_size += buffer->size;
+               }
+       }
+       mutex_unlock(&dev->buffer_lock);
+       seq_printf(s, "----------------------------------------------------\n");
+       seq_printf(s, "%16.s %16zu\n", "total orphaned",
+                  total_orphaned_size);
+       seq_printf(s, "%16.s %16zu\n", "total ", total_size);
+       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+               seq_printf(s, "%16.s %16zu\n", "deferred free",
+                               heap->free_list_size);
+       seq_printf(s, "----------------------------------------------------\n");
+
+       if (heap->debug_show)
+               heap->debug_show(heap, s, unused);
+
+       return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+       .open = ion_debug_heap_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+#ifdef DEBUG_HEAP_SHRINKER
+static int debug_shrink_set(void *data, u64 val)
+{
+       struct ion_heap *heap = data;
+       struct shrink_control sc;
+       int objs;
+
+       sc.gfp_mask = -1;
+       sc.nr_to_scan = 0;
+
+       if (!val)
+               return 0;
+
+       objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+       sc.nr_to_scan = objs;
+
+       heap->shrinker.shrink(&heap->shrinker, &sc);
+       return 0;
+}
+
+static int debug_shrink_get(void *data, u64 *val)
+{
+       struct ion_heap *heap = data;
+       struct shrink_control sc;
+       int objs;
+
+       sc.gfp_mask = -1;
+       sc.nr_to_scan = 0;
+
+       objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+       *val = objs;
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
+                       debug_shrink_set, "%llu\n");
+#endif
+
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+{
+       if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+           !heap->ops->unmap_dma)
+               pr_err("%s: can not add heap with invalid ops struct.\n",
+                      __func__);
+
+       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+               ion_heap_init_deferred_free(heap);
+
+       heap->dev = dev;
+       down_write(&dev->lock);
+       /* use negative heap->id to reverse the priority -- when traversing
+          the list later attempt higher id numbers first */
+       plist_node_init(&heap->node, -heap->id);
+       plist_add(&heap->node, &dev->heaps);
+       debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
+                           &debug_heap_fops);
+#ifdef DEBUG_HEAP_SHRINKER
+       if (heap->shrinker.shrink) {
+               char debug_name[64];
+
+               snprintf(debug_name, 64, "%s_shrink", heap->name);
+               debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
+                                   &debug_shrink_fops);
+       }
+#endif
+       up_write(&dev->lock);
+}
+
+struct ion_device *ion_device_create(long (*custom_ioctl)
+                                    (struct ion_client *client,
+                                     unsigned int cmd,
+                                     unsigned long arg))
+{
+       struct ion_device *idev;
+       int ret;
+
+       idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
+       if (!idev)
+               return ERR_PTR(-ENOMEM);
+
+       idev->dev.minor = MISC_DYNAMIC_MINOR;
+       idev->dev.name = "ion";
+       idev->dev.fops = &ion_fops;
+       idev->dev.parent = NULL;
+       ret = misc_register(&idev->dev);
+       if (ret) {
+               pr_err("ion: failed to register misc device.\n");
+               return ERR_PTR(ret);
+       }
+
+       idev->debug_root = debugfs_create_dir("ion", NULL);
+       if (!idev->debug_root)
+               pr_err("ion: failed to create debug files.\n");
+
+       idev->custom_ioctl = custom_ioctl;
+       idev->buffers = RB_ROOT;
+       mutex_init(&idev->buffer_lock);
+       init_rwsem(&idev->lock);
+       plist_head_init(&idev->heaps);
+       idev->clients = RB_ROOT;
+       return idev;
+}
+
+void ion_device_destroy(struct ion_device *dev)
+{
+       misc_deregister(&dev->dev);
+       /* XXX need to free the heaps and clients ? */
+       kfree(dev);
+}
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+       int i;
+
+       for (i = 0; i < data->nr; i++) {
+               if (data->heaps[i].size == 0)
+                       continue;
+
+               if (data->heaps[i].base == 0) {
+                       phys_addr_t paddr;
+                       paddr = memblock_alloc_base(data->heaps[i].size,
+                                                   data->heaps[i].align,
+                                                   MEMBLOCK_ALLOC_ANYWHERE);
+                       if (!paddr) {
+                               pr_err("%s: error allocating memblock for "
+                                      "heap %d\n",
+                                       __func__, i);
+                               continue;
+                       }
+                       data->heaps[i].base = paddr;
+               } else {
+                       int ret = memblock_reserve(data->heaps[i].base,
+                                              data->heaps[i].size);
+                       if (ret)
+                               pr_err("memblock reserve of %zx@%lx failed\n",
+                                      data->heaps[i].size,
+                                      data->heaps[i].base);
+               }
+               pr_info("%s: %s reserved base %lx size %zu\n", __func__,
+                       data->heaps[i].name,
+                       data->heaps[i].base,
+                       data->heaps[i].size);
+       }
+}
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
new file mode 100644 (file)
index 0000000..dcd2a0c
--- /dev/null
@@ -0,0 +1,204 @@
+/*
+ * drivers/staging/android/ion/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ION_H
+#define _LINUX_ION_H
+
+#include <linux/types.h>
+
+#include "../uapi/ion.h"
+
+struct ion_handle;
+struct ion_device;
+struct ion_heap;
+struct ion_mapper;
+struct ion_client;
+struct ion_buffer;
+
+/* This should be removed some day when phys_addr_t's are fully
+   plumbed in the kernel, and all instances of ion_phys_addr_t should
+   be converted to phys_addr_t.  For the time being many kernel interfaces
+   do not accept phys_addr_t's that would have to */
+#define ion_phys_addr_t unsigned long
+
+/**
+ * struct ion_platform_heap - defines a heap in the given platform
+ * @type:      type of the heap from ion_heap_type enum
+ * @id:                unique identifier for heap.  When allocating higher numbers
+ *             will be allocated from first.  At allocation these are passed
+ *             as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
+ * @name:      used for debug purposes
+ * @base:      base address of heap in physical memory if applicable
+ * @size:      size of the heap in bytes if applicable
+ * @align:     required alignment in physical memory if applicable
+ * @priv:      private info passed from the board file
+ *
+ * Provided by the board file.
+ */
+struct ion_platform_heap {
+       enum ion_heap_type type;
+       unsigned int id;
+       const char *name;
+       ion_phys_addr_t base;
+       size_t size;
+       ion_phys_addr_t align;
+       void *priv;
+};
+
+/**
+ * struct ion_platform_data - array of platform heaps passed from board file
+ * @nr:                number of structures in the array
+ * @heaps:     array of platform_heap structions
+ *
+ * Provided by the board file in the form of platform data to a platform device.
+ */
+struct ion_platform_data {
+       int nr;
+       struct ion_platform_heap *heaps;
+};
+
+/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data:      platform data specifying starting physical address and
+ *             size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specfic sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
+/**
+ * ion_client_create() -  allocate a client and returns it
+ * @dev:               the global ion device
+ * @heap_type_mask:    mask of heaps this client can allocate from
+ * @name:              used for debugging
+ */
+struct ion_client *ion_client_create(struct ion_device *dev,
+                                    const char *name);
+
+/**
+ * ion_client_destroy() -  free's a client and all it's handles
+ * @client:    the client
+ *
+ * Free the provided client and all it's resources including
+ * any handles it is holding.
+ */
+void ion_client_destroy(struct ion_client *client);
+
+/**
+ * ion_alloc - allocate ion memory
+ * @client:            the client
+ * @len:               size of the allocation
+ * @align:             requested allocation alignment, lots of hardware blocks
+ *                     have alignment requirements of some kind
+ * @heap_id_mask:      mask of heaps to allocate from, if multiple bits are set
+ *                     heaps will be tried in order from highest to lowest
+ *                     id
+ * @flags:             heap flags, the low 16 bits are consumed by ion, the
+ *                     high 16 bits are passed on to the respective heap and
+ *                     can be heap custom
+ *
+ * Allocate memory in one of the heaps provided in heap mask and return
+ * an opaque handle to it.
+ */
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+                            size_t align, unsigned int heap_id_mask,
+                            unsigned int flags);
+
+/**
+ * ion_free - free a handle
+ * @client:    the client
+ * @handle:    the handle to free
+ *
+ * Free the provided handle.
+ */
+void ion_free(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client:    the client
+ * @handle:    the handle
+ * @addr:      a pointer to put the address in
+ * @len:       a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address.  It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_sg_table should be used
+ * instead.  Returns -EINVAL if the handle is invalid.  This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+            ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client:    the client
+ * @handle:    the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+                             struct ion_handle *handle);
+
+/**
+ * ion_map_kernel - create mapping for the given handle
+ * @client:    the client
+ * @handle:    handle to map
+ *
+ * Map the given handle into the kernel and return a kernel address that
+ * can be used to access this address.
+ */
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_unmap_kernel() - destroy a kernel mapping for a handle
+ * @client:    the client
+ * @handle:    handle to unmap
+ */
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf() - share buffer as dma-buf
+ * @client:    the client
+ * @handle:    the handle
+ */
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+                                               struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
+ * @client:    the client
+ * @handle:    the handle
+ */
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
+ * @client:    the client
+ * @fd:                the dma-buf fd
+ *
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
+ * import that fd and return a handle representing it.  If a dma-buf from
+ * another exporter is passed in this function will return ERR_PTR(-EINVAL)
+ */
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
+
+#endif /* _LINUX_ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
new file mode 100644 (file)
index 0000000..5165de2
--- /dev/null
@@ -0,0 +1,194 @@
+/*
+ * drivers/gpu/ion/ion_carveout_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_carveout_heap {
+       struct ion_heap heap;
+       struct gen_pool *pool;
+       ion_phys_addr_t base;
+};
+
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+                                     unsigned long size,
+                                     unsigned long align)
+{
+       struct ion_carveout_heap *carveout_heap =
+               container_of(heap, struct ion_carveout_heap, heap);
+       unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
+
+       if (!offset)
+               return ION_CARVEOUT_ALLOCATE_FAIL;
+
+       return offset;
+}
+
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+                      unsigned long size)
+{
+       struct ion_carveout_heap *carveout_heap =
+               container_of(heap, struct ion_carveout_heap, heap);
+
+       if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+               return;
+       gen_pool_free(carveout_heap->pool, addr, size);
+}
+
+static int ion_carveout_heap_phys(struct ion_heap *heap,
+                                 struct ion_buffer *buffer,
+                                 ion_phys_addr_t *addr, size_t *len)
+{
+       struct sg_table *table = buffer->priv_virt;
+       struct page *page = sg_page(table->sgl);
+       ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+       *addr = paddr;
+       *len = buffer->size;
+       return 0;
+}
+
+static int ion_carveout_heap_allocate(struct ion_heap *heap,
+                                     struct ion_buffer *buffer,
+                                     unsigned long size, unsigned long align,
+                                     unsigned long flags)
+{
+       struct sg_table *table;
+       ion_phys_addr_t paddr;
+       int ret;
+
+       if (align > PAGE_SIZE)
+               return -EINVAL;
+
+       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
+       ret = sg_alloc_table(table, 1, GFP_KERNEL);
+       if (ret)
+               goto err_free;
+
+       paddr = ion_carveout_allocate(heap, size, align);
+       if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
+               ret = -ENOMEM;
+               goto err_free_table;
+       }
+
+       sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
+       buffer->priv_virt = table;
+
+       return 0;
+
+err_free_table:
+       sg_free_table(table);
+err_free:
+       kfree(table);
+       return ret;
+}
+
+static void ion_carveout_heap_free(struct ion_buffer *buffer)
+{
+       struct ion_heap *heap = buffer->heap;
+       struct sg_table *table = buffer->priv_virt;
+       struct page *page = sg_page(table->sgl);
+       ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+       ion_heap_buffer_zero(buffer);
+
+       if (ion_buffer_cached(buffer))
+               dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+                                                       DMA_BIDIRECTIONAL);
+
+       ion_carveout_free(heap, paddr, buffer->size);
+       sg_free_table(table);
+       kfree(table);
+}
+
+static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+                                                 struct ion_buffer *buffer)
+{
+       return buffer->priv_virt;
+}
+
+static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+                                       struct ion_buffer *buffer)
+{
+       return;
+}
+
+static struct ion_heap_ops carveout_heap_ops = {
+       .allocate = ion_carveout_heap_allocate,
+       .free = ion_carveout_heap_free,
+       .phys = ion_carveout_heap_phys,
+       .map_dma = ion_carveout_heap_map_dma,
+       .unmap_dma = ion_carveout_heap_unmap_dma,
+       .map_user = ion_heap_map_user,
+       .map_kernel = ion_heap_map_kernel,
+       .unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+{
+       struct ion_carveout_heap *carveout_heap;
+       int ret;
+
+       struct page *page;
+       size_t size;
+
+       page = pfn_to_page(PFN_DOWN(heap_data->base));
+       size = heap_data->size;
+
+       ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+       ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+       if (ret)
+               return ERR_PTR(ret);
+
+       carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
+       if (!carveout_heap)
+               return ERR_PTR(-ENOMEM);
+
+       carveout_heap->pool = gen_pool_create(12, -1);
+       if (!carveout_heap->pool) {
+               kfree(carveout_heap);
+               return ERR_PTR(-ENOMEM);
+       }
+       carveout_heap->base = heap_data->base;
+       gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
+                    -1);
+       carveout_heap->heap.ops = &carveout_heap_ops;
+       carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
+       carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+
+       return &carveout_heap->heap;
+}
+
+void ion_carveout_heap_destroy(struct ion_heap *heap)
+{
+       struct ion_carveout_heap *carveout_heap =
+            container_of(heap, struct  ion_carveout_heap, heap);
+
+       gen_pool_destroy(carveout_heap->pool);
+       kfree(carveout_heap);
+       carveout_heap = NULL;
+}
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
new file mode 100644 (file)
index 0000000..ca20d62
--- /dev/null
@@ -0,0 +1,195 @@
+/*
+ * drivers/gpu/ion/ion_chunk_heap.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_chunk_heap {
+       struct ion_heap heap;
+       struct gen_pool *pool;
+       ion_phys_addr_t base;
+       unsigned long chunk_size;
+       unsigned long size;
+       unsigned long allocated;
+};
+
+static int ion_chunk_heap_allocate(struct ion_heap *heap,
+                                     struct ion_buffer *buffer,
+                                     unsigned long size, unsigned long align,
+                                     unsigned long flags)
+{
+       struct ion_chunk_heap *chunk_heap =
+               container_of(heap, struct ion_chunk_heap, heap);
+       struct sg_table *table;
+       struct scatterlist *sg;
+       int ret, i;
+       unsigned long num_chunks;
+       unsigned long allocated_size;
+
+       if (align > chunk_heap->chunk_size)
+               return -EINVAL;
+
+       allocated_size = ALIGN(size, chunk_heap->chunk_size);
+       num_chunks = allocated_size / chunk_heap->chunk_size;
+
+       if (allocated_size > chunk_heap->size - chunk_heap->allocated)
+               return -ENOMEM;
+
+       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
+       ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
+       if (ret) {
+               kfree(table);
+               return ret;
+       }
+
+       sg = table->sgl;
+       for (i = 0; i < num_chunks; i++) {
+               unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
+                                                    chunk_heap->chunk_size);
+               if (!paddr)
+                       goto err;
+               sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
+                               chunk_heap->chunk_size, 0);
+               sg = sg_next(sg);
+       }
+
+       buffer->priv_virt = table;
+       chunk_heap->allocated += allocated_size;
+       return 0;
+err:
+       sg = table->sgl;
+       for (i -= 1; i >= 0; i--) {
+               gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+                             sg->length);
+               sg = sg_next(sg);
+       }
+       sg_free_table(table);
+       kfree(table);
+       return -ENOMEM;
+}
+
+static void ion_chunk_heap_free(struct ion_buffer *buffer)
+{
+       struct ion_heap *heap = buffer->heap;
+       struct ion_chunk_heap *chunk_heap =
+               container_of(heap, struct ion_chunk_heap, heap);
+       struct sg_table *table = buffer->priv_virt;
+       struct scatterlist *sg;
+       int i;
+       unsigned long allocated_size;
+
+       allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
+
+       ion_heap_buffer_zero(buffer);
+
+       if (ion_buffer_cached(buffer))
+               dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+                                                               DMA_BIDIRECTIONAL);
+
+       for_each_sg(table->sgl, sg, table->nents, i) {
+               gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+                             sg->length);
+       }
+       chunk_heap->allocated -= allocated_size;
+       sg_free_table(table);
+       kfree(table);
+}
+
+static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
+                                              struct ion_buffer *buffer)
+{
+       return buffer->priv_virt;
+}
+
+static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
+                                    struct ion_buffer *buffer)
+{
+       return;
+}
+
+static struct ion_heap_ops chunk_heap_ops = {
+       .allocate = ion_chunk_heap_allocate,
+       .free = ion_chunk_heap_free,
+       .map_dma = ion_chunk_heap_map_dma,
+       .unmap_dma = ion_chunk_heap_unmap_dma,
+       .map_user = ion_heap_map_user,
+       .map_kernel = ion_heap_map_kernel,
+       .unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
+{
+       struct ion_chunk_heap *chunk_heap;
+       int ret;
+       struct page *page;
+       size_t size;
+
+       page = pfn_to_page(PFN_DOWN(heap_data->base));
+       size = heap_data->size;
+
+       ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+       ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+       if (ret)
+               return ERR_PTR(ret);
+
+       chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
+       if (!chunk_heap)
+               return ERR_PTR(-ENOMEM);
+
+       chunk_heap->chunk_size = (unsigned long)heap_data->priv;
+       chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
+                                          PAGE_SHIFT, -1);
+       if (!chunk_heap->pool) {
+               ret = -ENOMEM;
+               goto error_gen_pool_create;
+       }
+       chunk_heap->base = heap_data->base;
+       chunk_heap->size = heap_data->size;
+       chunk_heap->allocated = 0;
+
+       gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
+       chunk_heap->heap.ops = &chunk_heap_ops;
+       chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
+       chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+       pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base,
+               heap_data->size, heap_data->align);
+
+       return &chunk_heap->heap;
+
+error_gen_pool_create:
+       kfree(chunk_heap);
+       return ERR_PTR(ret);
+}
+
+void ion_chunk_heap_destroy(struct ion_heap *heap)
+{
+       struct ion_chunk_heap *chunk_heap =
+            container_of(heap, struct  ion_chunk_heap, heap);
+
+       gen_pool_destroy(chunk_heap->pool);
+       kfree(chunk_heap);
+       chunk_heap = NULL;
+}
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
new file mode 100644 (file)
index 0000000..4418bda
--- /dev/null
@@ -0,0 +1,218 @@
+/*
+ * drivers/gpu/ion/ion_cma_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+
+#define ION_CMA_ALLOCATE_FAILED -1
+
+struct ion_cma_heap {
+       struct ion_heap heap;
+       struct device *dev;
+};
+
+#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
+
+struct ion_cma_buffer_info {
+       void *cpu_addr;
+       dma_addr_t handle;
+       struct sg_table *table;
+};
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replaced by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+                              void *cpu_addr, dma_addr_t handle, size_t size)
+{
+       struct page *page = virt_to_page(cpu_addr);
+       int ret;
+
+       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+       if (unlikely(ret))
+               return ret;
+
+       sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+       return 0;
+}
+
+/* ION CMA heap operations functions */
+static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
+                           unsigned long len, unsigned long align,
+                           unsigned long flags)
+{
+       struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+       struct device *dev = cma_heap->dev;
+       struct ion_cma_buffer_info *info;
+
+       dev_dbg(dev, "Request buffer allocation len %ld\n", len);
+
+       if (buffer->flags & ION_FLAG_CACHED)
+               return -EINVAL;
+
+       if (align > PAGE_SIZE)
+               return -EINVAL;
+
+       info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
+       if (!info) {
+               dev_err(dev, "Can't allocate buffer info\n");
+               return ION_CMA_ALLOCATE_FAILED;
+       }
+
+       info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
+                                               GFP_HIGHUSER | __GFP_ZERO);
+
+       if (!info->cpu_addr) {
+               dev_err(dev, "Fail to allocate buffer\n");
+               goto err;
+       }
+
+       info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+       if (!info->table) {
+               dev_err(dev, "Fail to allocate sg table\n");
+               goto free_mem;
+       }
+
+       if (ion_cma_get_sgtable
+           (dev, info->table, info->cpu_addr, info->handle, len))
+               goto free_table;
+       /* keep this for memory release */
+       buffer->priv_virt = info;
+       dev_dbg(dev, "Allocate buffer %p\n", buffer);
+       return 0;
+
+free_table:
+       kfree(info->table);
+free_mem:
+       dma_free_coherent(dev, len, info->cpu_addr, info->handle);
+err:
+       kfree(info);
+       return ION_CMA_ALLOCATE_FAILED;
+}
+
+static void ion_cma_free(struct ion_buffer *buffer)
+{
+       struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+       struct device *dev = cma_heap->dev;
+       struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+       dev_dbg(dev, "Release buffer %p\n", buffer);
+       /* release memory */
+       dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+       /* release sg table */
+       sg_free_table(info->table);
+       kfree(info->table);
+       kfree(info);
+}
+
+/* return physical address in addr */
+static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+                       ion_phys_addr_t *addr, size_t *len)
+{
+       struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+       struct device *dev = cma_heap->dev;
+       struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+       dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+               &info->handle);
+
+       *addr = info->handle;
+       *len = buffer->size;
+
+       return 0;
+}
+
+static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+                                            struct ion_buffer *buffer)
+{
+       struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+       return info->table;
+}
+
+static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+                                  struct ion_buffer *buffer)
+{
+       return;
+}
+
+static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
+                       struct vm_area_struct *vma)
+{
+       struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+       struct device *dev = cma_heap->dev;
+       struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+       return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
+                                buffer->size);
+}
+
+static void *ion_cma_map_kernel(struct ion_heap *heap,
+                               struct ion_buffer *buffer)
+{
+       struct ion_cma_buffer_info *info = buffer->priv_virt;
+       /* kernel memory mapping has been done at allocation time */
+       return info->cpu_addr;
+}
+
+static void ion_cma_unmap_kernel(struct ion_heap *heap,
+                                       struct ion_buffer *buffer)
+{
+}
+
+static struct ion_heap_ops ion_cma_ops = {
+       .allocate = ion_cma_allocate,
+       .free = ion_cma_free,
+       .map_dma = ion_cma_heap_map_dma,
+       .unmap_dma = ion_cma_heap_unmap_dma,
+       .phys = ion_cma_phys,
+       .map_user = ion_cma_mmap,
+       .map_kernel = ion_cma_map_kernel,
+       .unmap_kernel = ion_cma_unmap_kernel,
+};
+
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
+{
+       struct ion_cma_heap *cma_heap;
+
+       cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
+
+       if (!cma_heap)
+               return ERR_PTR(-ENOMEM);
+
+       cma_heap->heap.ops = &ion_cma_ops;
+       /* get device from private heaps data, later it will be
+        * used to make the link with reserved CMA memory */
+       cma_heap->dev = data->priv;
+       cma_heap->heap.type = ION_HEAP_TYPE_DMA;
+       return &cma_heap->heap;
+}
+
+void ion_cma_heap_destroy(struct ion_heap *heap)
+{
+       struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+
+       kfree(cma_heap);
+}
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
new file mode 100644 (file)
index 0000000..5b01e9e
--- /dev/null
@@ -0,0 +1,314 @@
+/*
+ * drivers/gpu/ion/ion_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+void *ion_heap_map_kernel(struct ion_heap *heap,
+                         struct ion_buffer *buffer)
+{
+       struct scatterlist *sg;
+       int i, j;
+       void *vaddr;
+       pgprot_t pgprot;
+       struct sg_table *table = buffer->sg_table;
+       int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+       struct page **pages = vmalloc(sizeof(struct page *) * npages);
+       struct page **tmp = pages;
+
+       if (!pages)
+               return NULL;
+
+       if (buffer->flags & ION_FLAG_CACHED)
+               pgprot = PAGE_KERNEL;
+       else
+               pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+       for_each_sg(table->sgl, sg, table->nents, i) {
+               int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
+               struct page *page = sg_page(sg);
+               BUG_ON(i >= npages);
+               for (j = 0; j < npages_this_entry; j++)
+                       *(tmp++) = page++;
+       }
+       vaddr = vmap(pages, npages, VM_MAP, pgprot);
+       vfree(pages);
+
+       if (vaddr == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       return vaddr;
+}
+
+void ion_heap_unmap_kernel(struct ion_heap *heap,
+                          struct ion_buffer *buffer)
+{
+       vunmap(buffer->vaddr);
+}
+
+int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+                     struct vm_area_struct *vma)
+{
+       struct sg_table *table = buffer->sg_table;
+       unsigned long addr = vma->vm_start;
+       unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+       struct scatterlist *sg;
+       int i;
+       int ret;
+
+       for_each_sg(table->sgl, sg, table->nents, i) {
+               struct page *page = sg_page(sg);
+               unsigned long remainder = vma->vm_end - addr;
+               unsigned long len = sg->length;
+
+               if (offset >= sg->length) {
+                       offset -= sg->length;
+                       continue;
+               } else if (offset) {
+                       page += offset / PAGE_SIZE;
+                       len = sg->length - offset;
+                       offset = 0;
+               }
+               len = min(len, remainder);
+               ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
+                               vma->vm_page_prot);
+               if (ret)
+                       return ret;
+               addr += len;
+               if (addr >= vma->vm_end)
+                       return 0;
+       }
+       return 0;
+}
+
+static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
+{
+       void *addr = vm_map_ram(pages, num, -1, pgprot);
+       if (!addr)
+               return -ENOMEM;
+       memset(addr, 0, PAGE_SIZE * num);
+       vm_unmap_ram(addr, num);
+
+       return 0;
+}
+
+static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
+                                               pgprot_t pgprot)
+{
+       int p = 0;
+       int ret = 0;
+       struct sg_page_iter piter;
+       struct page *pages[32];
+
+       for_each_sg_page(sgl, &piter, nents, 0) {
+               pages[p++] = sg_page_iter_page(&piter);
+               if (p == ARRAY_SIZE(pages)) {
+                       ret = ion_heap_clear_pages(pages, p, pgprot);
+                       if (ret)
+                               return ret;
+                       p = 0;
+               }
+       }
+       if (p)
+               ret = ion_heap_clear_pages(pages, p, pgprot);
+
+       return ret;
+}
+
+int ion_heap_buffer_zero(struct ion_buffer *buffer)
+{
+       struct sg_table *table = buffer->sg_table;
+       pgprot_t pgprot;
+
+       if (buffer->flags & ION_FLAG_CACHED)
+               pgprot = PAGE_KERNEL;
+       else
+               pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+       return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
+}
+
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
+{
+       struct scatterlist sg;
+
+       sg_init_table(&sg, 1);
+       sg_set_page(&sg, page, size, 0);
+       return ion_heap_sglist_zero(&sg, 1, pgprot);
+}
+
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
+{
+       rt_mutex_lock(&heap->lock);
+       list_add(&buffer->list, &heap->free_list);
+       heap->free_list_size += buffer->size;
+       rt_mutex_unlock(&heap->lock);
+       wake_up(&heap->waitqueue);
+}
+
+size_t ion_heap_freelist_size(struct ion_heap *heap)
+{
+       size_t size;
+
+       rt_mutex_lock(&heap->lock);
+       size = heap->free_list_size;
+       rt_mutex_unlock(&heap->lock);
+
+       return size;
+}
+
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+       struct ion_buffer *buffer, *tmp;
+       size_t total_drained = 0;
+
+       if (ion_heap_freelist_size(heap) == 0)
+               return 0;
+
+       rt_mutex_lock(&heap->lock);
+       if (size == 0)
+               size = heap->free_list_size;
+
+       list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
+               if (total_drained >= size)
+                       break;
+               list_del(&buffer->list);
+               heap->free_list_size -= buffer->size;
+               total_drained += buffer->size;
+               ion_buffer_destroy(buffer);
+       }
+       rt_mutex_unlock(&heap->lock);
+
+       return total_drained;
+}
+
+static int ion_heap_deferred_free(void *data)
+{
+       struct ion_heap *heap = data;
+
+       while (true) {
+               struct ion_buffer *buffer;
+
+               wait_event_freezable(heap->waitqueue,
+                                    ion_heap_freelist_size(heap) > 0);
+
+               rt_mutex_lock(&heap->lock);
+               if (list_empty(&heap->free_list)) {
+                       rt_mutex_unlock(&heap->lock);
+                       continue;
+               }
+               buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+                                         list);
+               list_del(&buffer->list);
+               heap->free_list_size -= buffer->size;
+               rt_mutex_unlock(&heap->lock);
+               ion_buffer_destroy(buffer);
+       }
+
+       return 0;
+}
+
+int ion_heap_init_deferred_free(struct ion_heap *heap)
+{
+       struct sched_param param = { .sched_priority = 0 };
+
+       INIT_LIST_HEAD(&heap->free_list);
+       heap->free_list_size = 0;
+       rt_mutex_init(&heap->lock);
+       init_waitqueue_head(&heap->waitqueue);
+       heap->task = kthread_run(ion_heap_deferred_free, heap,
+                                "%s", heap->name);
+       sched_setscheduler(heap->task, SCHED_IDLE, &param);
+       if (IS_ERR(heap->task)) {
+               pr_err("%s: creating thread for deferred free failed\n",
+                      __func__);
+               return PTR_RET(heap->task);
+       }
+       return 0;
+}
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
+{
+       struct ion_heap *heap = NULL;
+
+       switch (heap_data->type) {
+       case ION_HEAP_TYPE_SYSTEM_CONTIG:
+               heap = ion_system_contig_heap_create(heap_data);
+               break;
+       case ION_HEAP_TYPE_SYSTEM:
+               heap = ion_system_heap_create(heap_data);
+               break;
+       case ION_HEAP_TYPE_CARVEOUT:
+               heap = ion_carveout_heap_create(heap_data);
+               break;
+       case ION_HEAP_TYPE_CHUNK:
+               heap = ion_chunk_heap_create(heap_data);
+               break;
+       case ION_HEAP_TYPE_DMA:
+               heap = ion_cma_heap_create(heap_data);
+               break;
+       default:
+               pr_err("%s: Invalid heap type %d\n", __func__,
+                      heap_data->type);
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (IS_ERR_OR_NULL(heap)) {
+               pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
+                      __func__, heap_data->name, heap_data->type,
+                      heap_data->base, heap_data->size);
+               return ERR_PTR(-EINVAL);
+       }
+
+       heap->name = heap_data->name;
+       heap->id = heap_data->id;
+       return heap;
+}
+
+void ion_heap_destroy(struct ion_heap *heap)
+{
+       if (!heap)
+               return;
+
+       switch (heap->type) {
+       case ION_HEAP_TYPE_SYSTEM_CONTIG:
+               ion_system_contig_heap_destroy(heap);
+               break;
+       case ION_HEAP_TYPE_SYSTEM:
+               ion_system_heap_destroy(heap);
+               break;
+       case ION_HEAP_TYPE_CARVEOUT:
+               ion_carveout_heap_destroy(heap);
+               break;
+       case ION_HEAP_TYPE_CHUNK:
+               ion_chunk_heap_destroy(heap);
+               break;
+       case ION_HEAP_TYPE_DMA:
+               ion_cma_heap_destroy(heap);
+               break;
+       default:
+               pr_err("%s: Invalid heap type %d\n", __func__,
+                      heap->type);
+       }
+}
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
new file mode 100644 (file)
index 0000000..f087a02
--- /dev/null
@@ -0,0 +1,195 @@
+/*
+ * drivers/gpu/ion/ion_mem_pool.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "ion_priv.h"
+
+struct ion_page_pool_item {
+       struct page *page;
+       struct list_head list;
+};
+
+static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+{
+       struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+
+       if (!page)
+               return NULL;
+       ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+                                               DMA_BIDIRECTIONAL);
+       return page;
+}
+
+static void ion_page_pool_free_pages(struct ion_page_pool *pool,
+                                    struct page *page)
+{
+       __free_pages(page, pool->order);
+}
+
+static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
+{
+       struct ion_page_pool_item *item;
+
+       item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
+       if (!item)
+               return -ENOMEM;
+
+       mutex_lock(&pool->mutex);
+       item->page = page;
+       if (PageHighMem(page)) {
+               list_add_tail(&item->list, &pool->high_items);
+               pool->high_count++;
+       } else {
+               list_add_tail(&item->list, &pool->low_items);
+               pool->low_count++;
+       }
+       mutex_unlock(&pool->mutex);
+       return 0;
+}
+
+static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
+{
+       struct ion_page_pool_item *item;
+       struct page *page;
+
+       if (high) {
+               BUG_ON(!pool->high_count);
+               item = list_first_entry(&pool->high_items,
+                                       struct ion_page_pool_item, list);
+               pool->high_count--;
+       } else {
+               BUG_ON(!pool->low_count);
+               item = list_first_entry(&pool->low_items,
+                                       struct ion_page_pool_item, list);
+               pool->low_count--;
+       }
+
+       list_del(&item->list);
+       page = item->page;
+       kfree(item);
+       return page;
+}
+
+void *ion_page_pool_alloc(struct ion_page_pool *pool)
+{
+       struct page *page = NULL;
+
+       BUG_ON(!pool);
+
+       mutex_lock(&pool->mutex);
+       if (pool->high_count)
+               page = ion_page_pool_remove(pool, true);
+       else if (pool->low_count)
+               page = ion_page_pool_remove(pool, false);
+       mutex_unlock(&pool->mutex);
+
+       if (!page)
+               page = ion_page_pool_alloc_pages(pool);
+
+       return page;
+}
+
+void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
+{
+       int ret;
+
+       ret = ion_page_pool_add(pool, page);
+       if (ret)
+               ion_page_pool_free_pages(pool, page);
+}
+
+static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
+{
+       int total = 0;
+
+       total += high ? (pool->high_count + pool->low_count) *
+               (1 << pool->order) :
+                       pool->low_count * (1 << pool->order);
+       return total;
+}
+
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+                               int nr_to_scan)
+{
+       int nr_freed = 0;
+       int i;
+       bool high;
+
+       high = !!(gfp_mask & __GFP_HIGHMEM);
+
+       if (nr_to_scan == 0)
+               return ion_page_pool_total(pool, high);
+
+       for (i = 0; i < nr_to_scan; i++) {
+               struct page *page;
+
+               mutex_lock(&pool->mutex);
+               if (pool->low_count) {
+                       page = ion_page_pool_remove(pool, false);
+               } else if (high && pool->high_count) {
+                       page = ion_page_pool_remove(pool, true);
+               } else {
+                       mutex_unlock(&pool->mutex);
+                       break;
+               }
+               mutex_unlock(&pool->mutex);
+               ion_page_pool_free_pages(pool, page);
+               nr_freed += (1 << pool->order);
+       }
+
+       return nr_freed;
+}
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+{
+       struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
+                                            GFP_KERNEL);
+       if (!pool)
+               return NULL;
+       pool->high_count = 0;
+       pool->low_count = 0;
+       INIT_LIST_HEAD(&pool->low_items);
+       INIT_LIST_HEAD(&pool->high_items);
+       pool->gfp_mask = gfp_mask;
+       pool->order = order;
+       mutex_init(&pool->mutex);
+       plist_node_init(&pool->list, order);
+
+       return pool;
+}
+
+void ion_page_pool_destroy(struct ion_page_pool *pool)
+{
+       kfree(pool);
+}
+
+static int __init ion_page_pool_init(void)
+{
+       return 0;
+}
+
+static void __exit ion_page_pool_exit(void)
+{
+}
+
+module_init(ion_page_pool_init);
+module_exit(ion_page_pool_exit);
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
new file mode 100644 (file)
index 0000000..19691c0
--- /dev/null
@@ -0,0 +1,360 @@
+/*
+ * drivers/gpu/ion/ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ION_PRIV_H
+#define _ION_PRIV_H
+
+#include <linux/dma-direction.h>
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/shrinker.h>
+#include <linux/types.h>
+
+#include "ion.h"
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
+
+/**
+ * struct ion_buffer - metadata for a particular buffer
+ * @ref:               refernce count
+ * @node:              node in the ion_device buffers tree
+ * @dev:               back pointer to the ion_device
+ * @heap:              back pointer to the heap the buffer came from
+ * @flags:             buffer specific flags
+ * @size:              size of the buffer
+ * @priv_virt:         private data to the buffer representable as
+ *                     a void *
+ * @priv_phys:         private data to the buffer representable as
+ *                     an ion_phys_addr_t (and someday a phys_addr_t)
+ * @lock:              protects the buffers cnt fields
+ * @kmap_cnt:          number of times the buffer is mapped to the kernel
+ * @vaddr:             the kenrel mapping if kmap_cnt is not zero
+ * @dmap_cnt:          number of times the buffer is mapped for dma
+ * @sg_table:          the sg table for the buffer if dmap_cnt is not zero
+ * @pages:             flat array of pages in the buffer -- used by fault
+ *                     handler and only valid for buffers that are faulted in
+ * @vmas:              list of vma's mapping this buffer
+ * @handle_count:      count of handles referencing this buffer
+ * @task_comm:         taskcomm of last client to reference this buffer in a
+ *                     handle, used for debugging
+ * @pid:               pid of last client to reference this buffer in a
+ *                     handle, used for debugging
+*/
+struct ion_buffer {
+       struct kref ref;
+       union {
+               struct rb_node node;
+               struct list_head list;
+       };
+       struct ion_device *dev;
+       struct ion_heap *heap;
+       unsigned long flags;
+       size_t size;
+       union {
+               void *priv_virt;
+               ion_phys_addr_t priv_phys;
+       };
+       struct mutex lock;
+       int kmap_cnt;
+       void *vaddr;
+       int dmap_cnt;
+       struct sg_table *sg_table;
+       struct page **pages;
+       struct list_head vmas;
+       /* used to track orphaned buffers */
+       int handle_count;
+       char task_comm[TASK_COMM_LEN];
+       pid_t pid;
+};
+void ion_buffer_destroy(struct ion_buffer *buffer);
+
+/**
+ * struct ion_heap_ops - ops to operate on a given heap
+ * @allocate:          allocate memory
+ * @free:              free memory
+ * @phys               get physical address of a buffer (only define on
+ *                     physically contiguous heaps)
+ * @map_dma            map the memory for dma to a scatterlist
+ * @unmap_dma          unmap the memory for dma
+ * @map_kernel         map memory to the kernel
+ * @unmap_kernel       unmap memory to the kernel
+ * @map_user           map memory to userspace
+ *
+ * allocate, phys, and map_user return 0 on success, -errno on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on error.
+ */
+struct ion_heap_ops {
+       int (*allocate) (struct ion_heap *heap,
+                        struct ion_buffer *buffer, unsigned long len,
+                        unsigned long align, unsigned long flags);
+       void (*free) (struct ion_buffer *buffer);
+       int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
+                    ion_phys_addr_t *addr, size_t *len);
+       struct sg_table *(*map_dma) (struct ion_heap *heap,
+                                       struct ion_buffer *buffer);
+       void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
+       void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+       void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+       int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
+                        struct vm_area_struct *vma);
+};
+
+/**
+ * heap flags - flags between the heaps and core ion code
+ */
+#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
+
+/**
+ * struct ion_heap - represents a heap in the system
+ * @node:              rb node to put the heap on the device's tree of heaps
+ * @dev:               back pointer to the ion_device
+ * @type:              type of heap
+ * @ops:               ops struct as above
+ * @flags:             flags
+ * @id:                        id of heap, also indicates priority of this heap when
+ *                     allocating.  These are specified by platform data and
+ *                     MUST be unique
+ * @name:              used for debugging
+ * @shrinker:          a shrinker for the heap, if the heap caches system
+ *                     memory, it must define a shrinker to return it on low
+ *                     memory conditions, this includes system memory cached
+ *                     in the deferred free lists for heaps that support it
+ * @free_list:         free list head if deferred free is used
+ * @free_list_size     size of the deferred free list in bytes
+ * @lock:              protects the free list
+ * @waitqueue:         queue to wait on from deferred free thread
+ * @task:              task struct of deferred free thread
+ * @debug_show:                called when heap debug file is read to add any
+ *                     heap specific debug info to output
+ *
+ * Represents a pool of memory from which buffers can be made.  In some
+ * systems the only heap is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved heap.
+ */
+struct ion_heap {
+       struct plist_node node;
+       struct ion_device *dev;
+       enum ion_heap_type type;
+       struct ion_heap_ops *ops;
+       unsigned long flags;
+       unsigned int id;
+       const char *name;
+       struct shrinker shrinker;
+       struct list_head free_list;
+       size_t free_list_size;
+       struct rt_mutex lock;
+       wait_queue_head_t waitqueue;
+       struct task_struct *task;
+       int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
+};
+
+/**
+ * ion_buffer_cached - this ion buffer is cached
+ * @buffer:            buffer
+ *
+ * indicates whether this ion buffer is cached
+ */
+bool ion_buffer_cached(struct ion_buffer *buffer);
+
+/**
+ * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
+ * @buffer:            buffer
+ *
+ * indicates whether userspace mappings of this buffer will be faulted
+ * in, this can affect how buffers are allocated from the heap.
+ */
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
+
+/**
+ * ion_device_create - allocates and returns an ion device
+ * @custom_ioctl:      arch specific ioctl function if applicable
+ *
+ * returns a valid device or -PTR_ERR
+ */
+struct ion_device *ion_device_create(long (*custom_ioctl)
+                                    (struct ion_client *client,
+                                     unsigned int cmd,
+                                     unsigned long arg));
+
+/**
+ * ion_device_destroy - free and device and it's resource
+ * @dev:               the device
+ */
+void ion_device_destroy(struct ion_device *dev);
+
+/**
+ * ion_device_add_heap - adds a heap to the ion device
+ * @dev:               the device
+ * @heap:              the heap to add
+ */
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
+
+/**
+ * some helpers for common operations on buffers using the sg_table
+ * and vaddr fields
+ */
+void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
+void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
+int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
+                       struct vm_area_struct *);
+int ion_heap_buffer_zero(struct ion_buffer *buffer);
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
+
+/**
+ * ion_heap_init_deferred_free -- initialize deferred free functionality
+ * @heap:              the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
+ * be called to setup deferred frees. Calls to free the buffer will
+ * return immediately and the actual free will occur some time later
+ */
+int ion_heap_init_deferred_free(struct ion_heap *heap);
+
+/**
+ * ion_heap_freelist_add - add a buffer to the deferred free list
+ * @heap:              the heap
+ * @buffer:            the buffer
+ *
+ * Adds an item to the deferred freelist.
+ */
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
+
+/**
+ * ion_heap_freelist_drain - drain the deferred free list
+ * @heap:              the heap
+ * @size:              ammount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed.  The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ */
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
+
+/**
+ * ion_heap_freelist_size - returns the size of the freelist in bytes
+ * @heap:              the heap
+ */
+size_t ion_heap_freelist_size(struct ion_heap *heap);
+
+
+/**
+ * functions for creating and destroying the built in ion heaps.
+ * architectures can add their own custom architecture specific
+ * heaps as appropriate.
+ */
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *);
+void ion_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
+void ion_system_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
+void ion_system_contig_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
+void ion_carveout_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
+void ion_chunk_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
+void ion_cma_heap_destroy(struct ion_heap *);
+
+/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
+                                     unsigned long align);
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+                      unsigned long size);
+/**
+ * The carveout heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
+
+/**
+ * functions for creating and destroying a heap pool -- allows you
+ * to keep a pool of pre allocated memory to use from your heap.  Keeping
+ * a pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant peformance benefit on
+ * many systems */
+
+/**
+ * struct ion_page_pool - pagepool struct
+ * @high_count:                number of highmem items in the pool
+ * @low_count:         number of lowmem items in the pool
+ * @high_items:                list of highmem items
+ * @low_items:         list of lowmem items
+ * @shrinker:          a shrinker for the items
+ * @mutex:             lock protecting this struct and especially the count
+ *                     item list
+ * @alloc:             function to be used to allocate pageory when the pool
+ *                     is empty
+ * @free:              function to be used to free pageory back to the system
+ *                     when the shrinker fires
+ * @gfp_mask:          gfp_mask to use from alloc
+ * @order:             order of pages in the pool
+ * @list:              plist node for list of pools
+ *
+ * Allows you to keep a pool of pre allocated pages to use from your heap.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant peformance benefit
+ * on many systems
+ */
+struct ion_page_pool {
+       int high_count;
+       int low_count;
+       struct list_head high_items;
+       struct list_head low_items;
+       struct mutex mutex;
+       gfp_t gfp_mask;
+       unsigned int order;
+       struct plist_node list;
+};
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+void ion_page_pool_destroy(struct ion_page_pool *);
+void *ion_page_pool_alloc(struct ion_page_pool *);
+void ion_page_pool_free(struct ion_page_pool *, struct page *);
+
+/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
+ * @pool:              the pool
+ * @gfp_mask:          the memory type to reclaim
+ * @nr_to_scan:                number of items to shrink in pages
+ *
+ * returns the number of items freed in pages
+ */
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+                         int nr_to_scan);
+
+/**
+ * ion_pages_sync_for_device - cache flush pages for use with the specified
+ *                             device
+ * @dev:               the device the pages will be used with
+ * @page:              the first page to be flushed
+ * @size:              size in bytes of region to be flushed
+ * @dir:               direction of dma transfer
+ */
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+               size_t size, enum dma_data_direction dir);
+
+#endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
new file mode 100644 (file)
index 0000000..05e9dc9
--- /dev/null
@@ -0,0 +1,467 @@
+/*
+ * drivers/gpu/ion/ion_system_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+                                    __GFP_NORETRY) & ~__GFP_WAIT;
+static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
+static const unsigned int orders[] = {8, 4, 0};
+static const int num_orders = ARRAY_SIZE(orders);
+static int order_to_index(unsigned int order)
+{
+       int i;
+       for (i = 0; i < num_orders; i++)
+               if (order == orders[i])
+                       return i;
+       BUG();
+       return -1;
+}
+
+static unsigned int order_to_size(int order)
+{
+       return PAGE_SIZE << order;
+}
+
+struct ion_system_heap {
+       struct ion_heap heap;
+       struct ion_page_pool **pools;
+};
+
+struct page_info {
+       struct page *page;
+       unsigned int order;
+       struct list_head list;
+};
+
+static struct page *alloc_buffer_page(struct ion_system_heap *heap,
+                                     struct ion_buffer *buffer,
+                                     unsigned long order)
+{
+       bool cached = ion_buffer_cached(buffer);
+       struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+       struct page *page;
+
+       if (!cached) {
+               page = ion_page_pool_alloc(pool);
+       } else {
+               gfp_t gfp_flags = low_order_gfp_flags;
+
+               if (order > 4)
+                       gfp_flags = high_order_gfp_flags;
+               page = alloc_pages(gfp_flags, order);
+               if (!page)
+                       return NULL;
+               ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+                                               DMA_BIDIRECTIONAL);
+       }
+       if (!page)
+               return NULL;
+
+       return page;
+}
+
+static void free_buffer_page(struct ion_system_heap *heap,
+                            struct ion_buffer *buffer, struct page *page,
+                            unsigned int order)
+{
+       bool cached = ion_buffer_cached(buffer);
+
+       if (!cached) {
+               struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+               ion_page_pool_free(pool, page);
+       } else {
+               __free_pages(page, order);
+       }
+}
+
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+                                                struct ion_buffer *buffer,
+                                                unsigned long size,
+                                                unsigned int max_order)
+{
+       struct page *page;
+       struct page_info *info;
+       int i;
+
+       for (i = 0; i < num_orders; i++) {
+               if (size < order_to_size(orders[i]))
+                       continue;
+               if (max_order < orders[i])
+                       continue;
+
+               page = alloc_buffer_page(heap, buffer, orders[i]);
+               if (!page)
+                       continue;
+
+               info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
+               info->page = page;
+               info->order = orders[i];
+               return info;
+       }
+       return NULL;
+}
+
+static int ion_system_heap_allocate(struct ion_heap *heap,
+                                    struct ion_buffer *buffer,
+                                    unsigned long size, unsigned long align,
+                                    unsigned long flags)
+{
+       struct ion_system_heap *sys_heap = container_of(heap,
+                                                       struct ion_system_heap,
+                                                       heap);
+       struct sg_table *table;
+       struct scatterlist *sg;
+       int ret;
+       struct list_head pages;
+       struct page_info *info, *tmp_info;
+       int i = 0;
+       long size_remaining = PAGE_ALIGN(size);
+       unsigned int max_order = orders[0];
+
+       if (align > PAGE_SIZE)
+               return -EINVAL;
+
+       INIT_LIST_HEAD(&pages);
+       while (size_remaining > 0) {
+               info = alloc_largest_available(sys_heap, buffer, size_remaining,
+                                               max_order);
+               if (!info)
+                       goto err;
+               list_add_tail(&info->list, &pages);
+               size_remaining -= (1 << info->order) * PAGE_SIZE;
+               max_order = info->order;
+               i++;
+       }
+       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+       if (!table)
+               goto err;
+
+       ret = sg_alloc_table(table, i, GFP_KERNEL);
+       if (ret)
+               goto err1;
+
+       sg = table->sgl;
+       list_for_each_entry_safe(info, tmp_info, &pages, list) {
+               struct page *page = info->page;
+               sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
+               sg = sg_next(sg);
+               list_del(&info->list);
+               kfree(info);
+       }
+
+       buffer->priv_virt = table;
+       return 0;
+err1:
+       kfree(table);
+err:
+       list_for_each_entry_safe(info, tmp_info, &pages, list) {
+               free_buffer_page(sys_heap, buffer, info->page, info->order);
+               kfree(info);
+       }
+       return -ENOMEM;
+}
+
+static void ion_system_heap_free(struct ion_buffer *buffer)
+{
+       struct ion_heap *heap = buffer->heap;
+       struct ion_system_heap *sys_heap = container_of(heap,
+                                                       struct ion_system_heap,
+                                                       heap);
+       struct sg_table *table = buffer->sg_table;
+       bool cached = ion_buffer_cached(buffer);
+       struct scatterlist *sg;
+       LIST_HEAD(pages);
+       int i;
+
+       /* uncached pages come from the page pools, zero them before returning
+          for security purposes (other allocations are zerod at alloc time */
+       if (!cached)
+               ion_heap_buffer_zero(buffer);
+
+       for_each_sg(table->sgl, sg, table->nents, i)
+               free_buffer_page(sys_heap, buffer, sg_page(sg),
+                               get_order(sg->length));
+       sg_free_table(table);
+       kfree(table);
+}
+
+static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+                                               struct ion_buffer *buffer)
+{
+       return buffer->priv_virt;
+}
+
+static void ion_system_heap_unmap_dma(struct ion_heap *heap,
+                                     struct ion_buffer *buffer)
+{
+       return;
+}
+
+static struct ion_heap_ops system_heap_ops = {
+       .allocate = ion_system_heap_allocate,
+       .free = ion_system_heap_free,
+       .map_dma = ion_system_heap_map_dma,
+       .unmap_dma = ion_system_heap_unmap_dma,
+       .map_kernel = ion_heap_map_kernel,
+       .unmap_kernel = ion_heap_unmap_kernel,
+       .map_user = ion_heap_map_user,
+};
+
+static int ion_system_heap_shrink(struct shrinker *shrinker,
+                                 struct shrink_control *sc) {
+
+       struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+                                            shrinker);
+       struct ion_system_heap *sys_heap = container_of(heap,
+                                                       struct ion_system_heap,
+                                                       heap);
+       int nr_total = 0;
+       int nr_freed = 0;
+       int i;
+
+       if (sc->nr_to_scan == 0)
+               goto end;
+
+       /* shrink the free list first, no point in zeroing the memory if
+          we're just going to reclaim it */
+       nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) /
+               PAGE_SIZE;
+
+       if (nr_freed >= sc->nr_to_scan)
+               goto end;
+
+       for (i = 0; i < num_orders; i++) {
+               struct ion_page_pool *pool = sys_heap->pools[i];
+
+               nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask,
+                                                sc->nr_to_scan);
+               if (nr_freed >= sc->nr_to_scan)
+                       break;
+       }
+
+end:
+       /* total number of items is whatever the page pools are holding
+          plus whatever's in the freelist */
+       for (i = 0; i < num_orders; i++) {
+               struct ion_page_pool *pool = sys_heap->pools[i];
+               nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0);
+       }
+       nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
+       return nr_total;
+
+}
+
+static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
+                                     void *unused)
+{
+
+       struct ion_system_heap *sys_heap = container_of(heap,
+                                                       struct ion_system_heap,
+                                                       heap);
+       int i;
+       for (i = 0; i < num_orders; i++) {
+               struct ion_page_pool *pool = sys_heap->pools[i];
+               seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
+                          pool->high_count, pool->order,
+                          (1 << pool->order) * PAGE_SIZE * pool->high_count);
+               seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
+                          pool->low_count, pool->order,
+                          (1 << pool->order) * PAGE_SIZE * pool->low_count);
+       }
+       return 0;
+}
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+{
+       struct ion_system_heap *heap;
+       int i;
+
+       heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
+       if (!heap)
+               return ERR_PTR(-ENOMEM);
+       heap->heap.ops = &system_heap_ops;
+       heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+       heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+       heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
+                             GFP_KERNEL);
+       if (!heap->pools)
+               goto err_alloc_pools;
+       for (i = 0; i < num_orders; i++) {
+               struct ion_page_pool *pool;
+               gfp_t gfp_flags = low_order_gfp_flags;
+
+               if (orders[i] > 4)
+                       gfp_flags = high_order_gfp_flags;
+               pool = ion_page_pool_create(gfp_flags, orders[i]);
+               if (!pool)
+                       goto err_create_pool;
+               heap->pools[i] = pool;
+       }
+
+       heap->heap.shrinker.shrink = ion_system_heap_shrink;
+       heap->heap.shrinker.seeks = DEFAULT_SEEKS;
+       heap->heap.shrinker.batch = 0;
+       register_shrinker(&heap->heap.shrinker);
+       heap->heap.debug_show = ion_system_heap_debug_show;
+       return &heap->heap;
+err_create_pool:
+       for (i = 0; i < num_orders; i++)
+               if (heap->pools[i])
+                       ion_page_pool_destroy(heap->pools[i]);
+       kfree(heap->pools);
+err_alloc_pools:
+       kfree(heap);
+       return ERR_PTR(-ENOMEM);
+}
+
+void ion_system_heap_destroy(struct ion_heap *heap)
+{
+       struct ion_system_heap *sys_heap = container_of(heap,
+                                                       struct ion_system_heap,
+                                                       heap);
+       int i;
+
+       for (i = 0; i < num_orders; i++)
+               ion_page_pool_destroy(sys_heap->pools[i]);
+       kfree(sys_heap->pools);
+       kfree(sys_heap);
+}
+
+static int ion_system_contig_heap_allocate(struct ion_heap *heap,
+                                          struct ion_buffer *buffer,
+                                          unsigned long len,
+                                          unsigned long align,
+                                          unsigned long flags)
+{
+       int order = get_order(len);
+       struct page *page;
+       struct sg_table *table;
+       unsigned long i;
+       int ret;
+
+       if (align > (PAGE_SIZE << order))
+               return -EINVAL;
+
+       page = alloc_pages(low_order_gfp_flags, order);
+       if (!page)
+               return -ENOMEM;
+
+       split_page(page, order);
+
+       len = PAGE_ALIGN(len);
+       for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
+               __free_page(page + i);
+
+       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+       if (!table) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ret = sg_alloc_table(table, 1, GFP_KERNEL);
+       if (ret)
+               goto out;
+
+       sg_set_page(table->sgl, page, len, 0);
+
+       buffer->priv_virt = table;
+
+       ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
+
+       return 0;
+
+out:
+       for (i = 0; i < len >> PAGE_SHIFT; i++)
+               __free_page(page + i);
+       kfree(table);
+       return ret;
+}
+
+static void ion_system_contig_heap_free(struct ion_buffer *buffer)
+{
+       struct sg_table *table = buffer->priv_virt;
+       struct page *page = sg_page(table->sgl);
+       unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+       unsigned long i;
+
+       for (i = 0; i < pages; i++)
+               __free_page(page + i);
+       sg_free_table(table);
+       kfree(table);
+}
+
+static int ion_system_contig_heap_phys(struct ion_heap *heap,
+                                      struct ion_buffer *buffer,
+                                      ion_phys_addr_t *addr, size_t *len)
+{
+       struct sg_table *table = buffer->priv_virt;
+       struct page *page = sg_page(table->sgl);
+       *addr = page_to_phys(page);
+       *len = buffer->size;
+       return 0;
+}
+
+static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+                                               struct ion_buffer *buffer)
+{
+       return buffer->priv_virt;
+}
+
+static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+                                            struct ion_buffer *buffer)
+{
+}
+
+static struct ion_heap_ops kmalloc_ops = {
+       .allocate = ion_system_contig_heap_allocate,
+       .free = ion_system_contig_heap_free,
+       .phys = ion_system_contig_heap_phys,
+       .map_dma = ion_system_contig_heap_map_dma,
+       .unmap_dma = ion_system_contig_heap_unmap_dma,
+       .map_kernel = ion_heap_map_kernel,
+       .unmap_kernel = ion_heap_unmap_kernel,
+       .map_user = ion_heap_map_user,
+};
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
+{
+       struct ion_heap *heap;
+
+       heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+       if (!heap)
+               return ERR_PTR(-ENOMEM);
+       heap->ops = &kmalloc_ops;
+       heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+       return heap;
+}
+
+void ion_system_contig_heap_destroy(struct ion_heap *heap)
+{
+       kfree(heap);
+}
+
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
new file mode 100644 (file)
index 0000000..3e20349
--- /dev/null
@@ -0,0 +1,281 @@
+/*
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "ion-test: " fmt
+
+#include <linux/dma-buf.h>
+#include <linux/dma-direction.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "ion.h"
+#include "../uapi/ion_test.h"
+
+#define u64_to_uptr(x) ((void __user *)(unsigned long)(x))
+
+struct ion_test_device {
+       struct miscdevice misc;
+};
+
+struct ion_test_data {
+       struct dma_buf *dma_buf;
+       struct device *dev;
+};
+
+static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf,
+               void __user *ptr, size_t offset, size_t size, bool write)
+{
+       int ret = 0;
+       struct dma_buf_attachment *attach;
+       struct sg_table *table;
+       pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
+       enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+       struct sg_page_iter sg_iter;
+       unsigned long offset_page;
+
+       attach = dma_buf_attach(dma_buf, dev);
+       if (IS_ERR(attach))
+               return PTR_ERR(attach);
+
+       table = dma_buf_map_attachment(attach, dir);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       offset_page = offset >> PAGE_SHIFT;
+       offset %= PAGE_SIZE;
+
+       for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) {
+               struct page *page = sg_page_iter_page(&sg_iter);
+               void *vaddr = vmap(&page, 1, VM_MAP, pgprot);
+               size_t to_copy = PAGE_SIZE - offset;
+
+               to_copy = min(to_copy, size);
+               if (!vaddr) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               if (write)
+                       ret = copy_from_user(vaddr + offset, ptr, to_copy);
+               else
+                       ret = copy_to_user(ptr, vaddr + offset, to_copy);
+
+               vunmap(vaddr);
+               if (ret) {
+                       ret = -EFAULT;
+                       goto err;
+               }
+               size -= to_copy;
+               if (!size)
+                       break;
+               ptr += to_copy;
+               offset = 0;
+       }
+
+err:
+       dma_buf_unmap_attachment(attach, table, dir);
+       dma_buf_detach(dma_buf, attach);
+       return ret;
+}
+
+static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
+               size_t offset, size_t size, bool write)
+{
+       int ret;
+       unsigned long page_offset = offset >> PAGE_SHIFT;
+       size_t copy_offset = offset % PAGE_SIZE;
+       size_t copy_size = size;
+       enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+       if (offset > dma_buf->size || size > dma_buf->size - offset)
+               return -EINVAL;
+
+       ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
+       if (ret)
+               return ret;
+
+       while (copy_size > 0) {
+               size_t to_copy;
+               void *vaddr = dma_buf_kmap(dma_buf, page_offset);
+
+               if (!vaddr)
+                       goto err;
+
+               to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size);
+
+               if (write)
+                       ret = copy_from_user(vaddr + copy_offset, ptr, to_copy);
+               else
+                       ret = copy_to_user(ptr, vaddr + copy_offset, to_copy);
+
+               dma_buf_kunmap(dma_buf, page_offset, vaddr);
+               if (ret) {
+                       ret = -EFAULT;
+                       goto err;
+               }
+
+               copy_size -= to_copy;
+               ptr += to_copy;
+               page_offset++;
+               copy_offset = 0;
+       }
+err:
+       dma_buf_end_cpu_access(dma_buf, offset, size, dir);
+       return ret;
+}
+
+static long ion_test_ioctl(struct file *filp, unsigned int cmd,
+                                               unsigned long arg)
+{
+       struct ion_test_data *test_data = filp->private_data;
+       int ret = 0;
+
+       union {
+               struct ion_test_rw_data test_rw;
+       } data;
+
+       if (_IOC_SIZE(cmd) > sizeof(data))
+               return -EINVAL;
+
+       if (_IOC_DIR(cmd) & _IOC_WRITE)
+               if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+                       return -EFAULT;
+
+       switch (cmd) {
+       case ION_IOC_TEST_SET_FD:
+       {
+               struct dma_buf *dma_buf = NULL;
+               int fd = arg;
+
+               if (fd >= 0) {
+                       dma_buf = dma_buf_get((int)arg);
+                       if (IS_ERR(dma_buf))
+                               return PTR_ERR(dma_buf);
+               }
+               if (test_data->dma_buf)
+                       dma_buf_put(test_data->dma_buf);
+               test_data->dma_buf = dma_buf;
+               break;
+       }
+       case ION_IOC_TEST_DMA_MAPPING:
+       {
+               ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf,
+                                       u64_to_uptr(data.test_rw.ptr),
+                                       data.test_rw.offset, data.test_rw.size,
+                                       data.test_rw.write);
+               break;
+       }
+       case ION_IOC_TEST_KERNEL_MAPPING:
+       {
+               ret = ion_handle_test_kernel(test_data->dma_buf,
+                                       u64_to_uptr(data.test_rw.ptr),
+                                       data.test_rw.offset, data.test_rw.size,
+                                       data.test_rw.write);
+               break;
+       }
+       default:
+               return -ENOTTY;
+       }
+
+       if (_IOC_DIR(cmd) & _IOC_READ) {
+               if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+                       return -EFAULT;
+       }
+       return ret;
+}
+
+static int ion_test_open(struct inode *inode, struct file *file)
+{
+       struct ion_test_data *data;
+       struct miscdevice *miscdev = file->private_data;
+
+       data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->dev = miscdev->parent;
+
+       file->private_data = data;
+
+       return 0;
+}
+
+static int ion_test_release(struct inode *inode, struct file *file)
+{
+       struct ion_test_data *data = file->private_data;
+
+       kfree(data);
+
+       return 0;
+}
+
+static const struct file_operations ion_test_fops = {
+       .owner = THIS_MODULE,
+       .unlocked_ioctl = ion_test_ioctl,
+       .open = ion_test_open,
+       .release = ion_test_release,
+};
+
+static int __init ion_test_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct ion_test_device *testdev;
+
+       testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device),
+                               GFP_KERNEL);
+       if (!testdev)
+               return -ENOMEM;
+
+       testdev->misc.minor = MISC_DYNAMIC_MINOR;
+       testdev->misc.name = "ion-test";
+       testdev->misc.fops = &ion_test_fops;
+       testdev->misc.parent = &pdev->dev;
+       ret = misc_register(&testdev->misc);
+       if (ret) {
+               pr_err("failed to register misc device.\n");
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, testdev);
+
+       return 0;
+}
+
+static struct platform_driver ion_test_platform_driver = {
+       .driver = {
+               .name = "ion-test",
+       },
+};
+
+static int __init ion_test_init(void)
+{
+       platform_device_register_simple("ion-test", -1, NULL, 0);
+       return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
+}
+
+static void __exit ion_test_exit(void)
+{
+       platform_driver_unregister(&ion_test_platform_driver);
+}
+
+module_init(ion_test_init);
+module_exit(ion_test_exit);
diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile
new file mode 100644 (file)
index 0000000..11cd003
--- /dev/null
@@ -0,0 +1 @@
+obj-y += tegra_ion.o
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
new file mode 100644 (file)
index 0000000..0849600
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * drivers/gpu/tegra/tegra_ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "../ion.h"
+#include "../ion_priv.h"
+
+struct ion_device *idev;
+struct ion_mapper *tegra_user_mapper;
+int num_heaps;
+struct ion_heap **heaps;
+
+int tegra_ion_probe(struct platform_device *pdev)
+{
+       struct ion_platform_data *pdata = pdev->dev.platform_data;
+       int err;
+       int i;
+
+       num_heaps = pdata->nr;
+
+       heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
+
+       idev = ion_device_create(NULL);
+       if (IS_ERR_OR_NULL(idev)) {
+               kfree(heaps);
+               return PTR_ERR(idev);
+       }
+
+       /* create the heaps as specified in the board file */
+       for (i = 0; i < num_heaps; i++) {
+               struct ion_platform_heap *heap_data = &pdata->heaps[i];
+
+               heaps[i] = ion_heap_create(heap_data);
+               if (IS_ERR_OR_NULL(heaps[i])) {
+                       err = PTR_ERR(heaps[i]);
+                       goto err;
+               }
+               ion_device_add_heap(idev, heaps[i]);
+       }
+       platform_set_drvdata(pdev, idev);
+       return 0;
+err:
+       for (i = 0; i < num_heaps; i++) {
+               if (heaps[i])
+                       ion_heap_destroy(heaps[i]);
+       }
+       kfree(heaps);
+       return err;
+}
+
+int tegra_ion_remove(struct platform_device *pdev)
+{
+       struct ion_device *idev = platform_get_drvdata(pdev);
+       int i;
+
+       ion_device_destroy(idev);
+       for (i = 0; i < num_heaps; i++)
+               ion_heap_destroy(heaps[i]);
+       kfree(heaps);
+       return 0;
+}
+
+static struct platform_driver ion_driver = {
+       .probe = tegra_ion_probe,
+       .remove = tegra_ion_remove,
+       .driver = { .name = "ion-tegra" }
+};
+
+static int __init ion_init(void)
+{
+       return platform_driver_register(&ion_driver);
+}
+
+static void __exit ion_exit(void)
+{
+       platform_driver_unregister(&ion_driver);
+}
+
+module_init(ion_init);
+module_exit(ion_exit);
+
index aba25cbb0382070d284cf470939230f306c89d4f..1a50669ec8a945de62a4602fb9ac91b3bf8fc431 100644 (file)
 #define _LINUX_SW_SYNC_H
 
 #include <linux/types.h>
-
-#ifdef __KERNEL__
-
 #include <linux/kconfig.h>
 #include "sync.h"
+#include "uapi/sw_sync.h"
 
 struct sw_sync_timeline {
        struct  sync_timeline   obj;
@@ -58,19 +56,4 @@ static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
 }
 #endif /* IS_ENABLED(CONFIG_SW_SYNC) */
 
-#endif /* __KERNEL __ */
-
-struct sw_sync_create_fence_data {
-       __u32   value;
-       char    name[32];
-       __s32   fence; /* fd of new fence */
-};
-
-#define SW_SYNC_IOC_MAGIC      'W'
-
-#define SW_SYNC_IOC_CREATE_FENCE       _IOWR(SW_SYNC_IOC_MAGIC, 0,\
-               struct sw_sync_create_fence_data)
-#define SW_SYNC_IOC_INC                        _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-
-
 #endif /* _LINUX_SW_SYNC_H */
index 38ea986dc70f84dc72c38918f3a3b07d26e75ff4..75da9e85ac69762779200c10bdf3d766fdebd712 100644 (file)
 #define _LINUX_SYNC_H
 
 #include <linux/types.h>
-#ifdef __KERNEL__
-
 #include <linux/kref.h>
 #include <linux/ktime.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 
+#include "uapi/sync.h"
+
 struct sync_timeline;
 struct sync_pt;
 struct sync_fence;
@@ -341,86 +341,4 @@ int sync_fence_cancel_async(struct sync_fence *fence,
  */
 int sync_fence_wait(struct sync_fence *fence, long timeout);
 
-#endif /* __KERNEL__ */
-
-/**
- * struct sync_merge_data - data passed to merge ioctl
- * @fd2:       file descriptor of second fence
- * @name:      name of new fence
- * @fence:     returns the fd of the new fence to userspace
- */
-struct sync_merge_data {
-       __s32   fd2; /* fd of second fence */
-       char    name[32]; /* name of new fence */
-       __s32   fence; /* fd on newly created fence */
-};
-
-/**
- * struct sync_pt_info - detailed sync_pt information
- * @len:               length of sync_pt_info including any driver_data
- * @obj_name:          name of parent sync_timeline
- * @driver_name:       name of driver implmenting the parent
- * @status:            status of the sync_pt 0:active 1:signaled <0:error
- * @timestamp_ns:      timestamp of status change in nanoseconds
- * @driver_data:       any driver dependant data
- */
-struct sync_pt_info {
-       __u32   len;
-       char    obj_name[32];
-       char    driver_name[32];
-       __s32   status;
-       __u64   timestamp_ns;
-
-       __u8    driver_data[0];
-};
-
-/**
- * struct sync_fence_info_data - data returned from fence info ioctl
- * @len:       ioctl caller writes the size of the buffer its passing in.
- *             ioctl returns length of sync_fence_data reutnred to userspace
- *             including pt_info.
- * @name:      name of fence
- * @status:    status of fence. 1: signaled 0:active <0:error
- * @pt_info:   a sync_pt_info struct for every sync_pt in the fence
- */
-struct sync_fence_info_data {
-       __u32   len;
-       char    name[32];
-       __s32   status;
-
-       __u8    pt_info[0];
-};
-
-#define SYNC_IOC_MAGIC         '>'
-
-/**
- * DOC: SYNC_IOC_WAIT - wait for a fence to signal
- *
- * pass timeout in milliseconds.  Waits indefinitely timeout < 0.
- */
-#define SYNC_IOC_WAIT          _IOW(SYNC_IOC_MAGIC, 0, __s32)
-
-/**
- * DOC: SYNC_IOC_MERGE - merge two fences
- *
- * Takes a struct sync_merge_data.  Creates a new fence containing copies of
- * the sync_pts in both the calling fd and sync_merge_data.fd2.  Returns the
- * new fence's fd in sync_merge_data.fence
- */
-#define SYNC_IOC_MERGE         _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
-
-/**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
- *
- * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len.  On return, len is
- * updated to reflect the total size of the sync_fence_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
- */
-#define SYNC_IOC_FENCE_INFO    _IOWR(SYNC_IOC_MAGIC, 2,\
-       struct sync_fence_info_data)
-
 #endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/uapi/android_alarm.h b/drivers/staging/android/uapi/android_alarm.h
new file mode 100644 (file)
index 0000000..aa013f6
--- /dev/null
@@ -0,0 +1,62 @@
+/* drivers/staging/android/uapi/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ANDROID_ALARM_H
+#define _UAPI_LINUX_ANDROID_ALARM_H
+
+#include <linux/ioctl.h>
+#include <linux/time.h>
+
+enum android_alarm_type {
+       /* return code bit numbers or set alarm arg */
+       ANDROID_ALARM_RTC_WAKEUP,
+       ANDROID_ALARM_RTC,
+       ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+       ANDROID_ALARM_ELAPSED_REALTIME,
+       ANDROID_ALARM_SYSTEMTIME,
+
+       ANDROID_ALARM_TYPE_COUNT,
+
+       /* return code bit numbers */
+       /* ANDROID_ALARM_TIME_CHANGE = 16 */
+};
+
+enum android_alarm_return_flags {
+       ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+       ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+       ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
+                               1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+       ANDROID_ALARM_ELAPSED_REALTIME_MASK =
+                               1U << ANDROID_ALARM_ELAPSED_REALTIME,
+       ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+       ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+};
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type)           _IO('a', 0 | ((type) << 4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT                  _IO('a', 1)
+
+#define ALARM_IOW(c, type, size)            _IOW('a', (c) | ((type) << 4), size)
+/* Set alarm */
+#define ANDROID_ALARM_SET(type)             ALARM_IOW(2, type, struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type)    ALARM_IOW(3, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type)        ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_SET_RTC               _IOW('a', 5, struct timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd)         (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd)    (_IOC_NR(cmd) >> 4)
+
+#endif
diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
new file mode 100644 (file)
index 0000000..ba4743c
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * drivers/staging/android/uapi/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _UAPI_LINUX_ASHMEM_H
+#define _UAPI_LINUX_ASHMEM_H
+
+#include <linux/ioctl.h>
+
+#define ASHMEM_NAME_LEN                256
+
+#define ASHMEM_NAME_DEF                "dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED      0
+#define ASHMEM_WAS_PURGED      1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED     0
+#define ASHMEM_IS_PINNED       1
+
+struct ashmem_pin {
+       __u32 offset;   /* offset into region, in bytes, page-aligned */
+       __u32 len;      /* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC            0x77
+
+#define ASHMEM_SET_NAME                _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME                _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE                _IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE                _IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK   _IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK   _IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN             _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN           _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS  _IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES        _IO(__ASHMEMIOC, 10)
+
+#endif /* _UAPI_LINUX_ASHMEM_H */
diff --git a/drivers/staging/android/uapi/binder.h b/drivers/staging/android/uapi/binder.h
new file mode 100644 (file)
index 0000000..b6cb483
--- /dev/null
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_BINDER_H
+#define _UAPI_LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+       ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+       BINDER_TYPE_BINDER      = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+       BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+       BINDER_TYPE_HANDLE      = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+       BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+       BINDER_TYPE_FD          = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+       FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+       FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes.  The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur.  The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+       /* 8 bytes for large_flat_header. */
+       unsigned long           type;
+       unsigned long           flags;
+
+       /* 8 bytes of data. */
+       union {
+               void __user     *binder;        /* local object */
+               signed long     handle;         /* remote object */
+       };
+
+       /* extra data associated with local object */
+       void __user             *cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses appropriately.
+ */
+
+struct binder_write_read {
+       signed long     write_size;     /* bytes to write */
+       signed long     write_consumed; /* bytes consumed by driver */
+       unsigned long   write_buffer;
+       signed long     read_size;      /* bytes to read */
+       signed long     read_consumed;  /* bytes consumed by driver */
+       unsigned long   read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+       /* driver protocol version -- increment with incompatible change */
+       signed long     protocol_version;
+};
+
+/* This is the current protocol version. */
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+
+#define BINDER_WRITE_READ              _IOWR('b', 1, struct binder_write_read)
+#define        BINDER_SET_IDLE_TIMEOUT         _IOW('b', 3, __s64)
+#define        BINDER_SET_MAX_THREADS          _IOW('b', 5, size_t)
+#define        BINDER_SET_IDLE_PRIORITY        _IOW('b', 6, __s32)
+#define        BINDER_SET_CONTEXT_MGR          _IOW('b', 7, __s32)
+#define        BINDER_THREAD_EXIT              _IOW('b', 8, __s32)
+#define BINDER_VERSION                 _IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted.  This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process.  That is, the process is being destroyed.
+ * You should handle this by exiting from your process.  Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+       TF_ONE_WAY      = 0x01, /* this is a one-way call: async, no return */
+       TF_ROOT_OBJECT  = 0x04, /* contents are the component's root object */
+       TF_STATUS_CODE  = 0x08, /* contents are a 32-bit status code */
+       TF_ACCEPT_FDS   = 0x10, /* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+       /* The first two are only used for bcTRANSACTION and brTRANSACTION,
+        * identifying the target and contents of the transaction.
+        */
+       union {
+               size_t  handle; /* target descriptor of command transaction */
+               void    *ptr;   /* target descriptor of return transaction */
+       } target;
+       void            *cookie;        /* target object cookie */
+       unsigned int    code;           /* transaction command */
+
+       /* General information about the transaction. */
+       unsigned int    flags;
+       pid_t           sender_pid;
+       uid_t           sender_euid;
+       size_t          data_size;      /* number of bytes of data */
+       size_t          offsets_size;   /* number of bytes of offsets */
+
+       /* If this transaction is inline, the data immediately
+        * follows here; otherwise, it ends with a pointer to
+        * the data buffer.
+        */
+       union {
+               struct {
+                       /* transaction data */
+                       const void __user       *buffer;
+                       /* offsets from buffer to flat_binder_object structs */
+                       const void __user       *offsets;
+               } ptr;
+               uint8_t buf[8];
+       } data;
+};
+
+struct binder_ptr_cookie {
+       void *ptr;
+       void *cookie;
+};
+
+struct binder_pri_desc {
+       int priority;
+       int desc;
+};
+
+struct binder_pri_ptr_cookie {
+       int priority;
+       void *ptr;
+       void *cookie;
+};
+
+enum binder_driver_return_protocol {
+       BR_ERROR = _IOR('r', 0, int),
+       /*
+        * int: error code
+        */
+
+       BR_OK = _IO('r', 1),
+       /* No parameters! */
+
+       BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+       BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+       /*
+        * binder_transaction_data: the received command.
+        */
+
+       BR_ACQUIRE_RESULT = _IOR('r', 4, int),
+       /*
+        * not currently supported
+        * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+        * Else the remote object has acquired a primary reference.
+        */
+
+       BR_DEAD_REPLY = _IO('r', 5),
+       /*
+        * The target of the last transaction (either a bcTRANSACTION or
+        * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
+        */
+
+       BR_TRANSACTION_COMPLETE = _IO('r', 6),
+       /*
+        * No parameters... always refers to the last transaction requested
+        * (including replies).  Note that this will be sent even for
+        * asynchronous transactions.
+        */
+
+       BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+       BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+       BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+       BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+       /*
+        * void *:      ptr to binder
+        * void *: cookie for binder
+        */
+
+       BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+       /*
+        * not currently supported
+        * int: priority
+        * void *: ptr to binder
+        * void *: cookie for binder
+        */
+
+       BR_NOOP = _IO('r', 12),
+       /*
+        * No parameters.  Do nothing and examine the next command.  It exists
+        * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+        */
+
+       BR_SPAWN_LOOPER = _IO('r', 13),
+       /*
+        * No parameters.  The driver has determined that a process has no
+        * threads waiting to service incoming transactions.  When a process
+        * receives this command, it must spawn a new service thread and
+        * register it via bcENTER_LOOPER.
+        */
+
+       BR_FINISHED = _IO('r', 14),
+       /*
+        * not currently supported
+        * stop threadpool thread
+        */
+
+       BR_DEAD_BINDER = _IOR('r', 15, void *),
+       /*
+        * void *: cookie
+        */
+       BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
+       /*
+        * void *: cookie
+        */
+
+       BR_FAILED_REPLY = _IO('r', 17),
+       /*
+        * The the last transaction (either a bcTRANSACTION or
+        * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
+        */
+};
+
+enum binder_driver_command_protocol {
+       BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+       BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+       /*
+        * binder_transaction_data: the sent command.
+        */
+
+       BC_ACQUIRE_RESULT = _IOW('c', 2, int),
+       /*
+        * not currently supported
+        * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+        * Else you have acquired a primary reference on the object.
+        */
+
+       BC_FREE_BUFFER = _IOW('c', 3, int),
+       /*
+        * void *: ptr to transaction data received on a read
+        */
+
+       BC_INCREFS = _IOW('c', 4, int),
+       BC_ACQUIRE = _IOW('c', 5, int),
+       BC_RELEASE = _IOW('c', 6, int),
+       BC_DECREFS = _IOW('c', 7, int),
+       /*
+        * int: descriptor
+        */
+
+       BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+       BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+       /*
+        * void *: ptr to binder
+        * void *: cookie for binder
+        */
+
+       BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+       /*
+        * not currently supported
+        * int: priority
+        * int: descriptor
+        */
+
+       BC_REGISTER_LOOPER = _IO('c', 11),
+       /*
+        * No parameters.
+        * Register a spawned looper thread with the device.
+        */
+
+       BC_ENTER_LOOPER = _IO('c', 12),
+       BC_EXIT_LOOPER = _IO('c', 13),
+       /*
+        * No parameters.
+        * These two commands are sent as an application-level thread
+        * enters and exits the binder loop, respectively.  They are
+        * used so the binder can have an accurate count of the number
+        * of looping threads it has available.
+        */
+
+       BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
+       /*
+        * void *: ptr to binder
+        * void *: cookie
+        */
+
+       BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
+       /*
+        * void *: ptr to binder
+        * void *: cookie
+        */
+
+       BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
+       /*
+        * void *: cookie
+        */
+};
+
+#endif /* _UAPI_LINUX_BINDER_H */
+
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
new file mode 100644 (file)
index 0000000..f09e7c1
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_H
+#define _UAPI_LINUX_ION_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+typedef int ion_user_handle_t;
+
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM:       memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT:     memory allocated from a prereserved
+ *                              carveout heap, allocations are physically
+ *                              contiguous
+ * @ION_HEAP_TYPE_DMA:          memory allocated via DMA API
+ * @ION_NUM_HEAPS:              helper for iterating over heaps, a bit mask
+ *                              is used to identify the heaps, so only 32
+ *                              total heap types are supported
+ */
+enum ion_heap_type {
+       ION_HEAP_TYPE_SYSTEM,
+       ION_HEAP_TYPE_SYSTEM_CONTIG,
+       ION_HEAP_TYPE_CARVEOUT,
+       ION_HEAP_TYPE_CHUNK,
+       ION_HEAP_TYPE_DMA,
+       ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
+                                are at the end of this enum */
+       ION_NUM_HEAPS = 16,
+};
+
+#define ION_HEAP_SYSTEM_MASK           (1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK    (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK         (1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_TYPE_DMA_MASK         (1 << ION_HEAP_TYPE_DMA)
+
+#define ION_NUM_HEAP_IDS               sizeof(unsigned int) * 8
+
+/**
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+#define ION_FLAG_CACHED 1              /* mappings of this buffer should be
+                                          cached, ion will do cache
+                                          maintenance when the buffer is
+                                          mapped for dma */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2   /* mappings of this buffer will created
+                                          at mmap time, if this is set
+                                          caches must be managed manually */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len:               size of the allocation
+ * @align:             required alignment of the allocation
+ * @heap_id_mask:      mask of heap ids to allocate from
+ * @flags:             flags passed to heap
+ * @handle:            pointer that will be populated with a cookie to use to 
+ *                     refer to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+       size_t len;
+       size_t align;
+       unsigned int heap_id_mask;
+       unsigned int flags;
+       ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle:    a handle
+ * @fd:                a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field.  For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+       ion_user_handle_t handle;
+       int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle:    a handle
+ */
+struct ion_handle_data {
+       ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd:       the custom ioctl function to call
+ * @arg:       additional data to pass to the custom ioctl, typically a user
+ *             pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+       unsigned int cmd;
+       unsigned long arg;
+};
+
+#define ION_IOC_MAGIC          'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC          _IOWR(ION_IOC_MAGIC, 0, \
+                                     struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE           _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP            _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be passed to another process.  The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE          _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT         _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
+ *
+ * Deprecated in favor of using the dma_buf api's correctly (syncing
+ * will happend automatically when the buffer is mapped to a device).
+ * If necessary should be used after touching a cached buffer from the cpu,
+ * this will make the buffer in memory coherent.
+ */
+#define ION_IOC_SYNC           _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM         _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h
new file mode 100644 (file)
index 0000000..352379a
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_TEST_H
+#define _UAPI_LINUX_ION_TEST_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+typedef int ion_user_handle_t;
+
+/**
+ * struct ion_test_rw_data - metadata passed to the kernel to read handle
+ * @ptr:       a pointer to an area at least as large as size
+ * @offset:    offset into the ion buffer to start reading
+ * @size:      size to read or write
+ * @write:     1 to write, 0 to read
+ */
+struct ion_test_rw_data {
+       __u64 ptr;
+       __u64 offset;
+       __u64 size;
+       int write;
+};
+
+#define ION_IOC_MAGIC          'I'
+
+/**
+ * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver
+ *
+ * Attaches a dma buf fd to the test driver.  Passing a second fd or -1 will
+ * release the first fd.
+ */
+#define ION_IOC_TEST_SET_FD \
+                       _IO(ION_IOC_MAGIC, 0xf0)
+
+/**
+ * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA
+ *
+ * Reads or writes the memory from a handle using an uncached mapping.  Can be
+ * used by unit tests to emulate a DMA engine as close as possible.  Only
+ * expected to be used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_DMA_MAPPING \
+                       _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data)
+
+/**
+ * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle
+ *
+ * Reads or writes the memory from a handle using a kernel mapping.  Can be
+ * used by unit tests to test heap map_kernel functions.  Only expected to be
+ * used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_KERNEL_MAPPING \
+                       _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
+
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h
new file mode 100644 (file)
index 0000000..9b5d486
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SW_SYNC_H
+#define _UAPI_LINUX_SW_SYNC_H
+
+#include <linux/types.h>
+
+struct sw_sync_create_fence_data {
+       __u32   value;
+       char    name[32];
+       __s32   fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC      'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE       _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+               struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC                        _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+#endif /* _UAPI_LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h
new file mode 100644 (file)
index 0000000..57fdaad
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SYNC_H
+#define _UAPI_LINUX_SYNC_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct sync_merge_data - data passed to merge ioctl
+ * @fd2:       file descriptor of second fence
+ * @name:      name of new fence
+ * @fence:     returns the fd of the new fence to userspace
+ */
+struct sync_merge_data {
+       __s32   fd2; /* fd of second fence */
+       char    name[32]; /* name of new fence */
+       __s32   fence; /* fd on newly created fence */
+};
+
+/**
+ * struct sync_pt_info - detailed sync_pt information
+ * @len:               length of sync_pt_info including any driver_data
+ * @obj_name:          name of parent sync_timeline
+ * @driver_name:       name of driver implmenting the parent
+ * @status:            status of the sync_pt 0:active 1:signaled <0:error
+ * @timestamp_ns:      timestamp of status change in nanoseconds
+ * @driver_data:       any driver dependant data
+ */
+struct sync_pt_info {
+       __u32   len;
+       char    obj_name[32];
+       char    driver_name[32];
+       __s32   status;
+       __u64   timestamp_ns;
+
+       __u8    driver_data[0];
+};
+
+/**
+ * struct sync_fence_info_data - data returned from fence info ioctl
+ * @len:       ioctl caller writes the size of the buffer its passing in.
+ *             ioctl returns length of sync_fence_data reutnred to userspace
+ *             including pt_info.
+ * @name:      name of fence
+ * @status:    status of fence. 1: signaled 0:active <0:error
+ * @pt_info:   a sync_pt_info struct for every sync_pt in the fence
+ */
+struct sync_fence_info_data {
+       __u32   len;
+       char    name[32];
+       __s32   status;
+
+       __u8    pt_info[0];
+};
+
+#define SYNC_IOC_MAGIC         '>'
+
+/**
+ * DOC: SYNC_IOC_WAIT - wait for a fence to signal
+ *
+ * pass timeout in milliseconds.  Waits indefinitely timeout < 0.
+ */
+#define SYNC_IOC_WAIT          _IOW(SYNC_IOC_MAGIC, 0, __s32)
+
+/**
+ * DOC: SYNC_IOC_MERGE - merge two fences
+ *
+ * Takes a struct sync_merge_data.  Creates a new fence containing copies of
+ * the sync_pts in both the calling fd and sync_merge_data.fd2.  Returns the
+ * new fence's fd in sync_merge_data.fence
+ */
+#define SYNC_IOC_MERGE         _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
+
+/**
+ * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ *
+ * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
+ * Caller should write the size of the buffer into len.  On return, len is
+ * updated to reflect the total size of the sync_fence_info_data including
+ * pt_info.
+ *
+ * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
+ * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
+ */
+#define SYNC_IOC_FENCE_INFO    _IOWR(SYNC_IOC_MAGIC, 2,\
+       struct sync_fence_info_data)
+
+#endif /* _UAPI_LINUX_SYNC_H */
index 12fb818ab147c60a83786f51e87287dd64e87b08..960d64fbd40beda6683006b49d86d2a03a6b6191 100644 (file)
@@ -269,6 +269,17 @@ struct mtp_device_status {
        __le16  wCode;
 };
 
+struct mtp_data_header {
+       /* length of packet, including this header */
+       __le32  length;
+       /* container type (2 for data packet) */
+       __le16  type;
+       /* MTP command code */
+       __le16  command;
+       /* MTP transaction ID */
+       __le32  transaction_id;
+};
+
 /* temporary variable used between mtp_open() and mtp_gadget_bind() */
 static struct mtp_dev *_mtp_dev;
 
index bff23d33d66f777483325564a5b85f3e6b16291a..a3279c7def71b948fe68094a7fea161bba09123a 100644 (file)
@@ -23,8 +23,6 @@ source "drivers/gpu/drm/Kconfig"
 
 source "drivers/gpu/host1x/Kconfig"
 
-source "drivers/gpu/ion/Kconfig"
-
 config VGASTATE
        tristate
        default n
index fd5bcde87850f5bf13172edcb5fab75c31d633c3..933e74ac809835850245831d7314153bad9d2cc9 100644 (file)
@@ -37,6 +37,8 @@
 #define ADF_SHORT_FENCE_TIMEOUT (1 * MSEC_PER_SEC)
 #define ADF_LONG_FENCE_TIMEOUT (10 * MSEC_PER_SEC)
 
+static DEFINE_IDR(adf_devices);
+
 static void adf_fence_wait(struct adf_device *dev, struct sync_fence *fence)
 {
        /* sync_fence_wait() dumps debug information on timeout.  Experience
@@ -455,23 +457,20 @@ static int adf_obj_init(struct adf_obj *obj, enum adf_obj_type type,
                struct idr *idr, struct adf_device *parent,
                const struct adf_obj_ops *ops, const char *fmt, va_list args)
 {
+       int ret;
+
        if (ops && ops->supports_event && !ops->set_event) {
                pr_err("%s: %s implements supports_event but not set_event\n",
                                __func__, adf_obj_type_str(type));
                return -EINVAL;
        }
 
-       if (idr) {
-               int ret = idr_alloc(idr, obj, 0, 0, GFP_KERNEL);
-               if (ret < 0) {
-                       pr_err("%s: allocating object id failed: %d\n",
-                                       __func__, ret);
-                       return ret;
-               }
-               obj->id = ret;
-       } else {
-               obj->id = -1;
+       ret = idr_alloc(idr, obj, 0, 0, GFP_KERNEL);
+       if (ret < 0) {
+               pr_err("%s: allocating object id failed: %d\n", __func__, ret);
+               return ret;
        }
+       obj->id = ret;
 
        vscnprintf(obj->name, sizeof(obj->name), fmt, args);
 
@@ -498,8 +497,7 @@ static void adf_obj_destroy(struct adf_obj *obj, struct idr *idr)
        }
 
        mutex_destroy(&obj->event_lock);
-       if (idr)
-               idr_remove(idr, obj->id);
+       idr_remove(idr, obj->id);
 }
 
 /**
@@ -543,8 +541,8 @@ int adf_device_init(struct adf_device *dev, struct device *parent,
        memset(dev, 0, sizeof(*dev));
 
        va_start(args, fmt);
-       ret = adf_obj_init(&dev->base, ADF_OBJ_DEVICE, NULL, dev, &ops->base,
-                       fmt, args);
+       ret = adf_obj_init(&dev->base, ADF_OBJ_DEVICE, &adf_devices, dev,
+                       &ops->base, fmt, args);
        va_end(args);
        if (ret < 0)
                return ret;
@@ -612,7 +610,7 @@ void adf_device_destroy(struct adf_device *dev)
        }
        mutex_destroy(&dev->post_lock);
        mutex_destroy(&dev->client_lock);
-       adf_obj_destroy(&dev->base, NULL);
+       adf_obj_destroy(&dev->base, &adf_devices);
 }
 EXPORT_SYMBOL(adf_device_destroy);
 
index e4a792135072a53d676d7b9b4549eb8f0e7db253..bba873d34bbb3dd2c8b055e8c84f207893a42304 100644 (file)
@@ -49,6 +49,9 @@ int adf_interface_blank(struct adf_interface *intf, u8 state)
        if (!intf->ops || !intf->ops->blank)
                return -EOPNOTSUPP;
 
+       if (state > DRM_MODE_DPMS_OFF)
+               return -EINVAL;
+
        mutex_lock(&dev->client_lock);
        if (state != DRM_MODE_DPMS_ON)
                flush_kthread_worker(&dev->post_worker);
index 477abd63ccc240f5081d23434297ed464fb99b1a..cac34d14cbc2f30669c1c501325ab1fe880fda8d 100644 (file)
@@ -519,10 +519,10 @@ int adf_fbdev_blank(int blank, struct fb_info *info)
                dpms_state = DRM_MODE_DPMS_STANDBY;
                break;
        case FB_BLANK_VSYNC_SUSPEND:
-               dpms_state = DRM_MODE_DPMS_STANDBY;
+               dpms_state = DRM_MODE_DPMS_SUSPEND;
                break;
        case FB_BLANK_HSYNC_SUSPEND:
-               dpms_state = DRM_MODE_DPMS_SUSPEND;
+               dpms_state = DRM_MODE_DPMS_STANDBY;
                break;
        case FB_BLANK_POWERDOWN:
                dpms_state = DRM_MODE_DPMS_OFF;
index 076ccbd0cd888fca4d7a210489426f6f493a2c67..8c659c71ffa88b1686d48eb14ac315a624f056e9 100644 (file)
@@ -105,11 +105,6 @@ static struct device_attribute adf_interface_attrs[] = {
        __ATTR_RO(vsync_timestamp),
 };
 
-static char *adf_devnode(struct device *dev, umode_t *mode)
-{
-       return kasprintf(GFP_KERNEL, "adf/%s", dev_name(dev));
-}
-
 int adf_obj_sysfs_init(struct adf_obj *obj, struct device *parent)
 {
        int ret = idr_alloc(&adf_minors, obj, 0, 0, GFP_KERNEL);
@@ -142,7 +137,7 @@ static char *adf_device_devnode(struct device *dev, umode_t *mode,
                kuid_t *uid, kgid_t *gid)
 {
        struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
-       return kasprintf(GFP_KERNEL, "adf/%s/device", obj->name);
+       return kasprintf(GFP_KERNEL, "adf%d", obj->id);
 }
 
 static char *adf_interface_devnode(struct device *dev, umode_t *mode,
@@ -151,8 +146,8 @@ static char *adf_interface_devnode(struct device *dev, umode_t *mode,
        struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
        struct adf_interface *intf = adf_obj_to_interface(obj);
        struct adf_device *parent = adf_interface_parent(intf);
-       return kasprintf(GFP_KERNEL, "adf/%s/interface%d",
-                       parent->base.name, intf->base.id);
+       return kasprintf(GFP_KERNEL, "adf-interface%d.%d",
+                       parent->base.id, intf->base.id);
 }
 
 static char *adf_overlay_engine_devnode(struct device *dev, umode_t *mode,
@@ -161,8 +156,8 @@ static char *adf_overlay_engine_devnode(struct device *dev, umode_t *mode,
        struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
        struct adf_overlay_engine *eng = adf_obj_to_overlay_engine(obj);
        struct adf_device *parent = adf_overlay_engine_parent(eng);
-       return kasprintf(GFP_KERNEL, "adf/%s/overlay-engine%d",
-                       parent->base.name, eng->base.id);
+       return kasprintf(GFP_KERNEL, "adf-overlay-engine%d.%d",
+                       parent->base.id, eng->base.id);
 }
 
 static void adf_noop_release(struct device *dev)
@@ -285,7 +280,6 @@ int adf_sysfs_init(void)
                goto err_chrdev;
        }
 
-       class->devnode = adf_devnode;
        adf_class = class;
        adf_major = ret;
        return 0;
index c06bd6c8ba26d51252ef81b509e737e0e3b88934..e40aa1075a30e3f52d050268320edffec2a0e15f 100644 (file)
 #ifndef __LINUX_IF_PPPOLAC_H
 #define __LINUX_IF_PPPOLAC_H
 
-#include <linux/socket.h>
-#include <linux/types.h>
-
-struct sockaddr_pppolac {
-       sa_family_t     sa_family;      /* AF_PPPOX */
-       unsigned int    sa_protocol;    /* PX_PROTO_OLAC */
-       int             udp_socket;
-       struct __attribute__((packed)) {
-               __u16   tunnel, session;
-       } local, remote;
-} __attribute__((packed));
+#include <uapi/linux/if_pppolac.h>
 
 #endif /* __LINUX_IF_PPPOLAC_H */
index 0cf34b4d551fed8857fa35615022a4284d294f39..4ac621a9ce7c1d5db7635c6732d1a68fef38658c 100644 (file)
 #ifndef __LINUX_IF_PPPOPNS_H
 #define __LINUX_IF_PPPOPNS_H
 
-#include <linux/socket.h>
-#include <linux/types.h>
-
-struct sockaddr_pppopns {
-       sa_family_t     sa_family;      /* AF_PPPOX */
-       unsigned int    sa_protocol;    /* PX_PROTO_OPNS */
-       int             tcp_socket;
-       __u16           local;
-       __u16           remote;
-} __attribute__((packed));
+#include <uapi/linux/if_pppopns.h>
 
 #endif /* __LINUX_IF_PPPOPNS_H */
diff --git a/include/linux/ion.h b/include/linux/ion.h
deleted file mode 100644 (file)
index 5771f8c..0000000
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * include/linux/ion.h
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_ION_H
-#define _LINUX_ION_H
-
-#include <linux/types.h>
-
-typedef int ion_user_handle_t;
-
-/**
- * enum ion_heap_types - list of all possible types of heaps
- * @ION_HEAP_TYPE_SYSTEM:       memory allocated via vmalloc
- * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
- * @ION_HEAP_TYPE_CARVEOUT:     memory allocated from a prereserved
- *                              carveout heap, allocations are physically
- *                              contiguous
- * @ION_HEAP_TYPE_DMA:          memory allocated via DMA API
- * @ION_NUM_HEAPS:              helper for iterating over heaps, a bit mask
- *                              is used to identify the heaps, so only 32
- *                              total heap types are supported
- */
-enum ion_heap_type {
-       ION_HEAP_TYPE_SYSTEM,
-       ION_HEAP_TYPE_SYSTEM_CONTIG,
-       ION_HEAP_TYPE_CARVEOUT,
-       ION_HEAP_TYPE_CHUNK,
-       ION_HEAP_TYPE_DMA,
-       ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
-                                are at the end of this enum */
-       ION_NUM_HEAPS = 16,
-};
-
-#define ION_HEAP_SYSTEM_MASK           (1 << ION_HEAP_TYPE_SYSTEM)
-#define ION_HEAP_SYSTEM_CONTIG_MASK    (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
-#define ION_HEAP_CARVEOUT_MASK         (1 << ION_HEAP_TYPE_CARVEOUT)
-#define ION_HEAP_TYPE_DMA_MASK         (1 << ION_HEAP_TYPE_DMA)
-
-#define ION_NUM_HEAP_IDS               sizeof(unsigned int) * 8
-
-/**
- * allocation flags - the lower 16 bits are used by core ion, the upper 16
- * bits are reserved for use by the heaps themselves.
- */
-#define ION_FLAG_CACHED 1              /* mappings of this buffer should be
-                                          cached, ion will do cache
-                                          maintenance when the buffer is
-                                          mapped for dma */
-#define ION_FLAG_CACHED_NEEDS_SYNC 2   /* mappings of this buffer will created
-                                          at mmap time, if this is set
-                                          caches must be managed manually */
-
-#ifdef __KERNEL__
-struct ion_handle;
-struct ion_device;
-struct ion_heap;
-struct ion_mapper;
-struct ion_client;
-struct ion_buffer;
-
-/* This should be removed some day when phys_addr_t's are fully
-   plumbed in the kernel, and all instances of ion_phys_addr_t should
-   be converted to phys_addr_t.  For the time being many kernel interfaces
-   do not accept phys_addr_t's that would have to */
-#define ion_phys_addr_t unsigned long
-
-/**
- * struct ion_platform_heap - defines a heap in the given platform
- * @type:      type of the heap from ion_heap_type enum
- * @id:                unique identifier for heap.  When allocating higher numbers
- *             will be allocated from first.  At allocation these are passed
- *             as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
- * @name:      used for debug purposes
- * @base:      base address of heap in physical memory if applicable
- * @size:      size of the heap in bytes if applicable
- * @align:     required alignment in physical memory if applicable
- * @priv:      private info passed from the board file
- *
- * Provided by the board file.
- */
-struct ion_platform_heap {
-       enum ion_heap_type type;
-       unsigned int id;
-       const char *name;
-       ion_phys_addr_t base;
-       size_t size;
-       ion_phys_addr_t align;
-       void *priv;
-};
-
-/**
- * struct ion_platform_data - array of platform heaps passed from board file
- * @nr:                number of structures in the array
- * @heaps:     array of platform_heap structions
- *
- * Provided by the board file in the form of platform data to a platform device.
- */
-struct ion_platform_data {
-       int nr;
-       struct ion_platform_heap *heaps;
-};
-
-/**
- * ion_reserve() - reserve memory for ion heaps if applicable
- * @data:      platform data specifying starting physical address and
- *             size
- *
- * Calls memblock reserve to set aside memory for heaps that are
- * located at specific memory addresses or of specfic sizes not
- * managed by the kernel
- */
-void ion_reserve(struct ion_platform_data *data);
-
-/**
- * ion_client_create() -  allocate a client and returns it
- * @dev:               the global ion device
- * @heap_type_mask:    mask of heaps this client can allocate from
- * @name:              used for debugging
- */
-struct ion_client *ion_client_create(struct ion_device *dev,
-                                    const char *name);
-
-/**
- * ion_client_destroy() -  free's a client and all it's handles
- * @client:    the client
- *
- * Free the provided client and all it's resources including
- * any handles it is holding.
- */
-void ion_client_destroy(struct ion_client *client);
-
-/**
- * ion_alloc - allocate ion memory
- * @client:            the client
- * @len:               size of the allocation
- * @align:             requested allocation alignment, lots of hardware blocks
- *                     have alignment requirements of some kind
- * @heap_id_mask:      mask of heaps to allocate from, if multiple bits are set
- *                     heaps will be tried in order from highest to lowest
- *                     id
- * @flags:             heap flags, the low 16 bits are consumed by ion, the
- *                     high 16 bits are passed on to the respective heap and
- *                     can be heap custom
- *
- * Allocate memory in one of the heaps provided in heap mask and return
- * an opaque handle to it.
- */
-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
-                            size_t align, unsigned int heap_id_mask,
-                            unsigned int flags);
-
-/**
- * ion_free - free a handle
- * @client:    the client
- * @handle:    the handle to free
- *
- * Free the provided handle.
- */
-void ion_free(struct ion_client *client, struct ion_handle *handle);
-
-/**
- * ion_phys - returns the physical address and len of a handle
- * @client:    the client
- * @handle:    the handle
- * @addr:      a pointer to put the address in
- * @len:       a pointer to put the length in
- *
- * This function queries the heap for a particular handle to get the
- * handle's physical address.  It't output is only correct if
- * a heap returns physically contiguous memory -- in other cases
- * this api should not be implemented -- ion_sg_table should be used
- * instead.  Returns -EINVAL if the handle is invalid.  This has
- * no implications on the reference counting of the handle --
- * the returned value may not be valid if the caller is not
- * holding a reference.
- */
-int ion_phys(struct ion_client *client, struct ion_handle *handle,
-            ion_phys_addr_t *addr, size_t *len);
-
-/**
- * ion_map_dma - return an sg_table describing a handle
- * @client:    the client
- * @handle:    the handle
- *
- * This function returns the sg_table describing
- * a particular ion handle.
- */
-struct sg_table *ion_sg_table(struct ion_client *client,
-                             struct ion_handle *handle);
-
-/**
- * ion_map_kernel - create mapping for the given handle
- * @client:    the client
- * @handle:    handle to map
- *
- * Map the given handle into the kernel and return a kernel address that
- * can be used to access this address.
- */
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
-
-/**
- * ion_unmap_kernel() - destroy a kernel mapping for a handle
- * @client:    the client
- * @handle:    handle to unmap
- */
-void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
-
-/**
- * ion_share_dma_buf() - share buffer as dma-buf
- * @client:    the client
- * @handle:    the handle
- */
-struct dma_buf *ion_share_dma_buf(struct ion_client *client,
-                                               struct ion_handle *handle);
-
-/**
- * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
- * @client:    the client
- * @handle:    the handle
- */
-int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
-
-/**
- * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
- * @client:    the client
- * @fd:                the dma-buf fd
- *
- * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
- * import that fd and return a handle representing it.  If a dma-buf from
- * another exporter is passed in this function will return ERR_PTR(-EINVAL)
- */
-struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
-
-#endif /* __KERNEL__ */
-
-/**
- * DOC: Ion Userspace API
- *
- * create a client by opening /dev/ion
- * most operations handled via following ioctls
- *
- */
-
-/**
- * struct ion_allocation_data - metadata passed from userspace for allocations
- * @len:               size of the allocation
- * @align:             required alignment of the allocation
- * @heap_id_mask:      mask of heap ids to allocate from
- * @flags:             flags passed to heap
- * @handle:            pointer that will be populated with a cookie to use to 
- *                     refer to this allocation
- *
- * Provided by userspace as an argument to the ioctl
- */
-struct ion_allocation_data {
-       size_t len;
-       size_t align;
-       unsigned int heap_id_mask;
-       unsigned int flags;
-       ion_user_handle_t handle;
-};
-
-/**
- * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
- * @handle:    a handle
- * @fd:                a file descriptor representing that handle
- *
- * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
- * the handle returned from ion alloc, and the kernel returns the file
- * descriptor to share or map in the fd field.  For ION_IOC_IMPORT, userspace
- * provides the file descriptor and the kernel returns the handle.
- */
-struct ion_fd_data {
-       ion_user_handle_t handle;
-       int fd;
-};
-
-/**
- * struct ion_handle_data - a handle passed to/from the kernel
- * @handle:    a handle
- */
-struct ion_handle_data {
-       ion_user_handle_t handle;
-};
-
-/**
- * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
- * @cmd:       the custom ioctl function to call
- * @arg:       additional data to pass to the custom ioctl, typically a user
- *             pointer to a predefined structure
- *
- * This works just like the regular cmd and arg fields of an ioctl.
- */
-struct ion_custom_data {
-       unsigned int cmd;
-       unsigned long arg;
-};
-
-#define ION_IOC_MAGIC          'I'
-
-/**
- * DOC: ION_IOC_ALLOC - allocate memory
- *
- * Takes an ion_allocation_data struct and returns it with the handle field
- * populated with the opaque handle for the allocation.
- */
-#define ION_IOC_ALLOC          _IOWR(ION_IOC_MAGIC, 0, \
-                                     struct ion_allocation_data)
-
-/**
- * DOC: ION_IOC_FREE - free memory
- *
- * Takes an ion_handle_data struct and frees the handle.
- */
-#define ION_IOC_FREE           _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
-
-/**
- * DOC: ION_IOC_MAP - get a file descriptor to mmap
- *
- * Takes an ion_fd_data struct with the handle field populated with a valid
- * opaque handle.  Returns the struct with the fd field set to a file
- * descriptor open in the current address space.  This file descriptor
- * can then be used as an argument to mmap.
- */
-#define ION_IOC_MAP            _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
-
-/**
- * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
- *
- * Takes an ion_fd_data struct with the handle field populated with a valid
- * opaque handle.  Returns the struct with the fd field set to a file
- * descriptor open in the current address space.  This file descriptor
- * can then be passed to another process.  The corresponding opaque handle can
- * be retrieved via ION_IOC_IMPORT.
- */
-#define ION_IOC_SHARE          _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
-
-/**
- * DOC: ION_IOC_IMPORT - imports a shared file descriptor
- *
- * Takes an ion_fd_data struct with the fd field populated with a valid file
- * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
- * filed set to the corresponding opaque handle.
- */
-#define ION_IOC_IMPORT         _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
-
-/**
- * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
- *
- * Deprecated in favor of using the dma_buf api's correctly (syncing
- * will happend automatically when the buffer is mapped to a device).
- * If necessary should be used after touching a cached buffer from the cpu,
- * this will make the buffer in memory coherent.
- */
-#define ION_IOC_SYNC           _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
-
-/**
- * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
- *
- * Takes the argument of the architecture specific ioctl to call and
- * passes appropriate userdata for that ioctl
- */
-#define ION_IOC_CUSTOM         _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
-
-#endif /* _LINUX_ION_H */
index 856a5850217b24442901f2f1c8799bf158b9d007..08cf5402102ca2a0bb7b8bc7ef1c89ae3430969f 100644 (file)
 #ifndef __LINUX_KEYCHORD_H_
 #define __LINUX_KEYCHORD_H_
 
-#include <linux/input.h>
-
-#define KEYCHORD_VERSION               1
-
-/*
- * One or more input_keychord structs are written to /dev/keychord
- * at once to specify the list of keychords to monitor.
- * Reading /dev/keychord returns the id of a keychord when the
- * keychord combination is pressed.  A keychord is signalled when
- * all of the keys in the keycode list are in the pressed state.
- * The order in which the keys are pressed does not matter.
- * The keychord will not be signalled if keys not in the keycode
- * list are pressed.
- * Keychords will not be signalled on key release events.
- */
-struct input_keychord {
-       /* should be KEYCHORD_VERSION */
-       __u16 version;
-       /*
-        * client specified ID, returned from read()
-        * when this keychord is pressed.
-        */
-       __u16 id;
-
-       /* number of keycodes in this keychord */
-       __u16 count;
-
-       /* variable length array of keycodes */
-       __u16 keycodes[];
-};
+#include <uapi/linux/keychord.h>
 
 #endif /* __LINUX_KEYCHORD_H_ */
index 61ebe0aabc5ba44beca1eae91e9ac774c99ca77d..ebe3c4d59309842b1b92b1d30d04264e02f0300b 100644 (file)
 #ifndef __LINUX_USB_F_ACCESSORY_H
 #define __LINUX_USB_F_ACCESSORY_H
 
-/* Use Google Vendor ID when in accessory mode */
-#define USB_ACCESSORY_VENDOR_ID 0x18D1
-
-
-/* Product ID to use when in accessory mode */
-#define USB_ACCESSORY_PRODUCT_ID 0x2D00
-
-/* Product ID to use when in accessory mode and adb is enabled */
-#define USB_ACCESSORY_ADB_PRODUCT_ID 0x2D01
-
-/* Indexes for strings sent by the host via ACCESSORY_SEND_STRING */
-#define ACCESSORY_STRING_MANUFACTURER   0
-#define ACCESSORY_STRING_MODEL          1
-#define ACCESSORY_STRING_DESCRIPTION    2
-#define ACCESSORY_STRING_VERSION        3
-#define ACCESSORY_STRING_URI            4
-#define ACCESSORY_STRING_SERIAL         5
-
-/* Control request for retrieving device's protocol version
- *
- *     requestType:    USB_DIR_IN | USB_TYPE_VENDOR
- *     request:        ACCESSORY_GET_PROTOCOL
- *     value:          0
- *     index:          0
- *     data            version number (16 bits little endian)
- *                     1 for original accessory support
- *                     2 adds HID and device to host audio support
- */
-#define ACCESSORY_GET_PROTOCOL  51
-
-/* Control request for host to send a string to the device
- *
- *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
- *     request:        ACCESSORY_SEND_STRING
- *     value:          0
- *     index:          string ID
- *     data            zero terminated UTF8 string
- *
- *  The device can later retrieve these strings via the
- *  ACCESSORY_GET_STRING_* ioctls
- */
-#define ACCESSORY_SEND_STRING   52
-
-/* Control request for starting device in accessory mode.
- * The host sends this after setting all its strings to the device.
- *
- *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
- *     request:        ACCESSORY_START
- *     value:          0
- *     index:          0
- *     data            none
- */
-#define ACCESSORY_START         53
-
-/* Control request for registering a HID device.
- * Upon registering, a unique ID is sent by the accessory in the
- * value parameter. This ID will be used for future commands for
- * the device
- *
- *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
- *     request:        ACCESSORY_REGISTER_HID_DEVICE
- *     value:          Accessory assigned ID for the HID device
- *     index:          total length of the HID report descriptor
- *     data            none
- */
-#define ACCESSORY_REGISTER_HID         54
-
-/* Control request for unregistering a HID device.
- *
- *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
- *     request:        ACCESSORY_REGISTER_HID
- *     value:          Accessory assigned ID for the HID device
- *     index:          0
- *     data            none
- */
-#define ACCESSORY_UNREGISTER_HID         55
-
-/* Control request for sending the HID report descriptor.
- * If the HID descriptor is longer than the endpoint zero max packet size,
- * the descriptor will be sent in multiple ACCESSORY_SET_HID_REPORT_DESC
- * commands. The data for the descriptor must be sent sequentially
- * if multiple packets are needed.
- *
- *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
- *     request:        ACCESSORY_SET_HID_REPORT_DESC
- *     value:          Accessory assigned ID for the HID device
- *     index:          offset of data in descriptor
- *                      (needed when HID descriptor is too big for one packet)
- *     data            the HID report descriptor
- */
-#define ACCESSORY_SET_HID_REPORT_DESC         56
-
-/* Control request for sending HID events.
- *
- *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
- *     request:        ACCESSORY_SEND_HID_EVENT
- *     value:          Accessory assigned ID for the HID device
- *     index:          0
- *     data            the HID report for the event
- */
-#define ACCESSORY_SEND_HID_EVENT         57
-
-/* Control request for setting the audio mode.
- *
- *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
- *     request:        ACCESSORY_SET_AUDIO_MODE
- *     value:          0 - no audio
- *                     1 - device to host, 44100 16-bit stereo PCM
- *     index:          0
- *     data            none
- */
-#define ACCESSORY_SET_AUDIO_MODE         58
-
-/* ioctls for retrieving strings set by the host */
-#define ACCESSORY_GET_STRING_MANUFACTURER   _IOW('M', 1, char[256])
-#define ACCESSORY_GET_STRING_MODEL          _IOW('M', 2, char[256])
-#define ACCESSORY_GET_STRING_DESCRIPTION    _IOW('M', 3, char[256])
-#define ACCESSORY_GET_STRING_VERSION        _IOW('M', 4, char[256])
-#define ACCESSORY_GET_STRING_URI            _IOW('M', 5, char[256])
-#define ACCESSORY_GET_STRING_SERIAL         _IOW('M', 6, char[256])
-/* returns 1 if there is a start request pending */
-#define ACCESSORY_IS_START_REQUESTED        _IO('M', 7)
-/* returns audio mode (set via the ACCESSORY_SET_AUDIO_MODE control request) */
-#define ACCESSORY_GET_AUDIO_MODE            _IO('M', 8)
+#include <uapi/linux/usb/f_accessory.h>
 
 #endif /* __LINUX_USB_F_ACCESSORY_H */
index 72a432e2fcdd66ca0cd89b7fc84e174c4c38c3b4..4e8417791bea47c498939a81334f043a980b0a4f 100644 (file)
 #ifndef __LINUX_USB_F_MTP_H
 #define __LINUX_USB_F_MTP_H
 
-#include <linux/ioctl.h>
-
-#ifdef __KERNEL__
-
-struct mtp_data_header {
-       /* length of packet, including this header */
-       uint32_t        length;
-       /* container type (2 for data packet) */
-       uint16_t        type;
-       /* MTP command code */
-       uint16_t    command;
-       /* MTP transaction ID */
-       uint32_t        transaction_id;
-};
-
-#endif /* __KERNEL__ */
-
-struct mtp_file_range {
-       /* file descriptor for file to transfer */
-       int                     fd;
-       /* offset in file for start of transfer */
-       loff_t          offset;
-       /* number of bytes to transfer */
-       int64_t         length;
-       /* MTP command ID for data header,
-        * used only for MTP_SEND_FILE_WITH_HEADER
-        */
-       uint16_t        command;
-       /* MTP transaction ID for data header,
-        * used only for MTP_SEND_FILE_WITH_HEADER
-        */
-       uint32_t        transaction_id;
-};
-
-struct mtp_event {
-       /* size of the event */
-       size_t          length;
-       /* event data to send */
-       void            *data;
-};
-
-/* Sends the specified file range to the host */
-#define MTP_SEND_FILE              _IOW('M', 0, struct mtp_file_range)
-/* Receives data from the host and writes it to a file.
- * The file is created if it does not exist.
- */
-#define MTP_RECEIVE_FILE           _IOW('M', 1, struct mtp_file_range)
-/* Sends an event to the host via the interrupt endpoint */
-#define MTP_SEND_EVENT             _IOW('M', 3, struct mtp_event)
-/* Sends the specified file range to the host,
- * with a 12 byte MTP data packet header at the beginning.
- */
-#define MTP_SEND_FILE_WITH_HEADER  _IOW('M', 4, struct mtp_file_range)
+#include <uapi/linux/usb/f_mtp.h>
 
 #endif /* __LINUX_USB_F_MTP_H */
diff --git a/include/uapi/linux/if_pppolac.h b/include/uapi/linux/if_pppolac.h
new file mode 100644 (file)
index 0000000..b7eb815
--- /dev/null
@@ -0,0 +1,33 @@
+/* include/uapi/linux/if_pppolac.h
+ *
+ * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_LINUX_IF_PPPOLAC_H
+#define _UAPI_LINUX_IF_PPPOLAC_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppolac {
+       sa_family_t     sa_family;      /* AF_PPPOX */
+       unsigned int    sa_protocol;    /* PX_PROTO_OLAC */
+       int             udp_socket;
+       struct __attribute__((packed)) {
+               __u16   tunnel, session;
+       } local, remote;
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_IF_PPPOLAC_H */
diff --git a/include/uapi/linux/if_pppopns.h b/include/uapi/linux/if_pppopns.h
new file mode 100644 (file)
index 0000000..a392b52
--- /dev/null
@@ -0,0 +1,32 @@
+/* include/uapi/linux/if_pppopns.h
+ *
+ * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_LINUX_IF_PPPOPNS_H
+#define _UAPI_LINUX_IF_PPPOPNS_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppopns {
+       sa_family_t     sa_family;      /* AF_PPPOX */
+       unsigned int    sa_protocol;    /* PX_PROTO_OPNS */
+       int             tcp_socket;
+       __u16           local;
+       __u16           remote;
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_IF_PPPOPNS_H */
diff --git a/include/uapi/linux/keychord.h b/include/uapi/linux/keychord.h
new file mode 100644 (file)
index 0000000..ea7cf4d
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ *  Key chord input driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _UAPI_LINUX_KEYCHORD_H_
+#define _UAPI_LINUX_KEYCHORD_H_
+
+#include <linux/input.h>
+
+#define KEYCHORD_VERSION               1
+
+/*
+ * One or more input_keychord structs are written to /dev/keychord
+ * at once to specify the list of keychords to monitor.
+ * Reading /dev/keychord returns the id of a keychord when the
+ * keychord combination is pressed.  A keychord is signalled when
+ * all of the keys in the keycode list are in the pressed state.
+ * The order in which the keys are pressed does not matter.
+ * The keychord will not be signalled if keys not in the keycode
+ * list are pressed.
+ * Keychords will not be signalled on key release events.
+ */
+struct input_keychord {
+       /* should be KEYCHORD_VERSION */
+       __u16 version;
+       /*
+        * client specified ID, returned from read()
+        * when this keychord is pressed.
+        */
+       __u16 id;
+
+       /* number of keycodes in this keychord */
+       __u16 count;
+
+       /* variable length array of keycodes */
+       __u16 keycodes[];
+};
+
+#endif /* _UAPI_LINUX_KEYCHORD_H_ */
diff --git a/include/uapi/linux/usb/f_accessory.h b/include/uapi/linux/usb/f_accessory.h
new file mode 100644 (file)
index 0000000..0baeb7d
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_USB_F_ACCESSORY_H
+#define _UAPI_LINUX_USB_F_ACCESSORY_H
+
+/* Use Google Vendor ID when in accessory mode */
+#define USB_ACCESSORY_VENDOR_ID 0x18D1
+
+
+/* Product ID to use when in accessory mode */
+#define USB_ACCESSORY_PRODUCT_ID 0x2D00
+
+/* Product ID to use when in accessory mode and adb is enabled */
+#define USB_ACCESSORY_ADB_PRODUCT_ID 0x2D01
+
+/* Indexes for strings sent by the host via ACCESSORY_SEND_STRING */
+#define ACCESSORY_STRING_MANUFACTURER   0
+#define ACCESSORY_STRING_MODEL          1
+#define ACCESSORY_STRING_DESCRIPTION    2
+#define ACCESSORY_STRING_VERSION        3
+#define ACCESSORY_STRING_URI            4
+#define ACCESSORY_STRING_SERIAL         5
+
+/* Control request for retrieving device's protocol version
+ *
+ *     requestType:    USB_DIR_IN | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_GET_PROTOCOL
+ *     value:          0
+ *     index:          0
+ *     data            version number (16 bits little endian)
+ *                     1 for original accessory support
+ *                     2 adds HID and device to host audio support
+ */
+#define ACCESSORY_GET_PROTOCOL  51
+
+/* Control request for host to send a string to the device
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_SEND_STRING
+ *     value:          0
+ *     index:          string ID
+ *     data            zero terminated UTF8 string
+ *
+ *  The device can later retrieve these strings via the
+ *  ACCESSORY_GET_STRING_* ioctls
+ */
+#define ACCESSORY_SEND_STRING   52
+
+/* Control request for starting device in accessory mode.
+ * The host sends this after setting all its strings to the device.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_START
+ *     value:          0
+ *     index:          0
+ *     data            none
+ */
+#define ACCESSORY_START         53
+
+/* Control request for registering a HID device.
+ * Upon registering, a unique ID is sent by the accessory in the
+ * value parameter. This ID will be used for future commands for
+ * the device
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_REGISTER_HID_DEVICE
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          total length of the HID report descriptor
+ *     data            none
+ */
+#define ACCESSORY_REGISTER_HID         54
+
+/* Control request for unregistering a HID device.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_REGISTER_HID
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          0
+ *     data            none
+ */
+#define ACCESSORY_UNREGISTER_HID         55
+
+/* Control request for sending the HID report descriptor.
+ * If the HID descriptor is longer than the endpoint zero max packet size,
+ * the descriptor will be sent in multiple ACCESSORY_SET_HID_REPORT_DESC
+ * commands. The data for the descriptor must be sent sequentially
+ * if multiple packets are needed.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_SET_HID_REPORT_DESC
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          offset of data in descriptor
+ *                      (needed when HID descriptor is too big for one packet)
+ *     data            the HID report descriptor
+ */
+#define ACCESSORY_SET_HID_REPORT_DESC         56
+
+/* Control request for sending HID events.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_SEND_HID_EVENT
+ *     value:          Accessory assigned ID for the HID device
+ *     index:          0
+ *     data            the HID report for the event
+ */
+#define ACCESSORY_SEND_HID_EVENT         57
+
+/* Control request for setting the audio mode.
+ *
+ *     requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *     request:        ACCESSORY_SET_AUDIO_MODE
+ *     value:          0 - no audio
+ *                     1 - device to host, 44100 16-bit stereo PCM
+ *     index:          0
+ *     data            none
+ */
+#define ACCESSORY_SET_AUDIO_MODE         58
+
+/* ioctls for retrieving strings set by the host */
+#define ACCESSORY_GET_STRING_MANUFACTURER   _IOW('M', 1, char[256])
+#define ACCESSORY_GET_STRING_MODEL          _IOW('M', 2, char[256])
+#define ACCESSORY_GET_STRING_DESCRIPTION    _IOW('M', 3, char[256])
+#define ACCESSORY_GET_STRING_VERSION        _IOW('M', 4, char[256])
+#define ACCESSORY_GET_STRING_URI            _IOW('M', 5, char[256])
+#define ACCESSORY_GET_STRING_SERIAL         _IOW('M', 6, char[256])
+/* returns 1 if there is a start request pending */
+#define ACCESSORY_IS_START_REQUESTED        _IO('M', 7)
+/* returns audio mode (set via the ACCESSORY_SET_AUDIO_MODE control request) */
+#define ACCESSORY_GET_AUDIO_MODE            _IO('M', 8)
+
+#endif /* _UAPI_LINUX_USB_F_ACCESSORY_H */
diff --git a/include/uapi/linux/usb/f_mtp.h b/include/uapi/linux/usb/f_mtp.h
new file mode 100644 (file)
index 0000000..5032918
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_USB_F_MTP_H
+#define _UAPI_LINUX_USB_F_MTP_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+struct mtp_file_range {
+       /* file descriptor for file to transfer */
+       int                     fd;
+       /* offset in file for start of transfer */
+       loff_t          offset;
+       /* number of bytes to transfer */
+       int64_t         length;
+       /* MTP command ID for data header,
+        * used only for MTP_SEND_FILE_WITH_HEADER
+        */
+       uint16_t        command;
+       /* MTP transaction ID for data header,
+        * used only for MTP_SEND_FILE_WITH_HEADER
+        */
+       uint32_t        transaction_id;
+};
+
+struct mtp_event {
+       /* size of the event */
+       size_t          length;
+       /* event data to send */
+       void            *data;
+};
+
+/* Sends the specified file range to the host */
+#define MTP_SEND_FILE              _IOW('M', 0, struct mtp_file_range)
+/* Receives data from the host and writes it to a file.
+ * The file is created if it does not exist.
+ */
+#define MTP_RECEIVE_FILE           _IOW('M', 1, struct mtp_file_range)
+/* Sends an event to the host via the interrupt endpoint */
+#define MTP_SEND_EVENT             _IOW('M', 3, struct mtp_event)
+/* Sends the specified file range to the host,
+ * with a 12 byte MTP data packet header at the beginning.
+ */
+#define MTP_SEND_FILE_WITH_HEADER  _IOW('M', 4, struct mtp_file_range)
+
+#endif /* _UAPI_LINUX_USB_F_MTP_H */
index 2ba345ca458b15d8f19c59cf29df40421bd66570..38458f6428b5a1662b4767b0975667e7472d2d67 100644 (file)
@@ -22,7 +22,7 @@
 #include <drm/drm_mode.h>
 
 #define ADF_NAME_LEN 32
-#define ADF_MAX_CUSTOM_DATA_SIZE PAGE_SIZE
+#define ADF_MAX_CUSTOM_DATA_SIZE 4096
 
 enum adf_interface_type {
        ADF_INTF_DSI = 0,
@@ -126,7 +126,7 @@ struct adf_buffer_config {
 
        __s64 acquire_fence;
 };
-#define ADF_MAX_BUFFERS (PAGE_SIZE / sizeof(struct adf_buffer_config))
+#define ADF_MAX_BUFFERS (4096 / sizeof(struct adf_buffer_config))
 
 /**
  * struct adf_post_config - request to flip to a new set of buffers
@@ -152,7 +152,7 @@ struct adf_post_config {
 
        __s64 complete_fence;
 };
-#define ADF_MAX_INTERFACES (PAGE_SIZE / sizeof(__u32))
+#define ADF_MAX_INTERFACES (4096 / sizeof(__u32))
 
 /**
  * struct adf_simple_buffer_allocate - request to allocate a "simple" buffer
@@ -233,7 +233,7 @@ struct adf_device_data {
        size_t custom_data_size;
        void __user *custom_data;
 };
-#define ADF_MAX_ATTACHMENTS (PAGE_SIZE / sizeof(struct adf_attachment))
+#define ADF_MAX_ATTACHMENTS (4096 / sizeof(struct adf_attachment_config))
 
 /**
  * struct adf_device_data - describes a display interface
@@ -273,7 +273,7 @@ struct adf_interface_data {
        size_t custom_data_size;
        void __user *custom_data;
 };
-#define ADF_MAX_MODES (PAGE_SIZE / sizeof(struct drm_mode_modeinfo))
+#define ADF_MAX_MODES (4096 / sizeof(struct drm_mode_modeinfo))
 
 /**
  * struct adf_overlay_engine_data - describes an overlay engine
@@ -293,7 +293,7 @@ struct adf_overlay_engine_data {
        size_t custom_data_size;
        void __user *custom_data;
 };
-#define ADF_MAX_SUPPORTED_FORMATS (PAGE_SIZE / sizeof(__u32))
+#define ADF_MAX_SUPPORTED_FORMATS (4096 / sizeof(__u32))
 
 #define ADF_SET_EVENT          _IOW('D', 0, struct adf_set_event)
 #define ADF_BLANK              _IOW('D', 1, __u8)
index 1c9090bc674607cd8a8276f996e241e4457757d1..126b7c939d1f76fdb4c6fe60a79f71da873ab307 100644 (file)
@@ -2101,7 +2101,7 @@ static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
 }
 #endif
 
-
+#ifdef CONFIG_MMU
 static int prctl_update_vma_anon_name(struct vm_area_struct *vma,
                struct vm_area_struct **prev,
                unsigned long start, unsigned long end,
@@ -2240,6 +2240,13 @@ static int prctl_set_vma(unsigned long opt, unsigned long start,
 
        return error;
 }
+#else /* CONFIG_MMU */
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+               unsigned long len_in, unsigned long arg)
+{
+       return -EINVAL;
+}
+#endif
 
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                unsigned long, arg4, unsigned long, arg5)
@@ -2368,9 +2375,6 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                        else
                                return -EINVAL;
                        break;
-               case PR_SET_VMA:
-                       error = prctl_set_vma(arg2, arg3, arg4, arg5);
-                       break;
                default:
                        return -EINVAL;
                }
@@ -2407,6 +2411,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                if (arg2 || arg3 || arg4 || arg5)
                        return -EINVAL;
                return current->no_new_privs ? 1 : 0;
+       case PR_SET_VMA:
+               error = prctl_set_vma(arg2, arg3, arg4, arg5);
+               break;
        default:
                error = -EINVAL;
                break;
index 0b34b3fd09408b747c5351276147bdc9b7fa4030..28fb17845f7000168f26540b6bc7aa55388e9031 100644 (file)
@@ -728,7 +728,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
                        ((vmstart - vma->vm_start) >> PAGE_SHIFT);
                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
                                  vma->anon_vma, vma->vm_file, pgoff,
-                                 new_pol, vma_get_anon_name(name));
+                                 new_pol, vma_get_anon_name(vma));
                if (prev) {
                        vma = prev;
                        next = vma->vm_next;
index e476b88f9d68eb030da0ea6f115d726cf2b4c450..4a16829969a68b9560f0c63ff6ef175886683458 100644 (file)
@@ -1496,7 +1496,7 @@ static const struct file_operations proc_iface_stat_fmt_fops = {
        .open           = proc_iface_stat_fmt_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = seq_release,
+       .release        = seq_release_private,
 };
 
 static int __init iface_stat_init(struct proc_dir_entry *parent_procdir)
@@ -2904,7 +2904,7 @@ static const struct file_operations proc_qtaguid_ctrl_fops = {
        .read           = seq_read,
        .write          = qtaguid_ctrl_proc_write,
        .llseek         = seq_lseek,
-       .release        = seq_release,
+       .release        = seq_release_private,
 };
 
 static const struct seq_operations proc_qtaguid_stats_seqops = {